summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosh Magee <joshua_magee@playstation.sony.com>2014-02-01 01:36:16 +0000
committerJosh Magee <joshua_magee@playstation.sony.com>2014-02-01 01:36:16 +0000
commitcde5c26c465d9831546896aa5f2e81358ae05e6c (patch)
tree50f646096ce637650f1178a128b4058856823db7
parent8a24e835504105efdf6d882053d5da7b0e1dccd3 (diff)
downloadllvm-cde5c26c465d9831546896aa5f2e81358ae05e6c.tar.gz
llvm-cde5c26c465d9831546896aa5f2e81358ae05e6c.tar.bz2
llvm-cde5c26c465d9831546896aa5f2e81358ae05e6c.tar.xz
[stackprotector] Implement the sspstrong rules for stack layout.
This changes the PrologueEpilogInserter and LocalStackSlotAllocation passes to follow the extended stack layout rules for sspstrong and sspreq. The sspstrong layout rules are: 1. Large arrays and structures containing large arrays (>= ssp-buffer-size) are closest to the stack protector. 2. Small arrays and structures containing small arrays (< ssp-buffer-size) are 2nd closest to the protector. 3. Variables that have had their address taken are 3rd closest to the protector. Differential Revision: http://llvm-reviews.chandlerc.com/D2546 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200601 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--docs/LangRef.rst25
-rw-r--r--lib/CodeGen/LocalStackSlotAllocation.cpp11
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp11
-rw-r--r--test/CodeGen/ARM/ssp-data-layout.ll321
-rw-r--r--test/CodeGen/X86/ssp-data-layout.ll280
5 files changed, 644 insertions, 4 deletions
diff --git a/docs/LangRef.rst b/docs/LangRef.rst
index 07e94a9f84..e1880f2bc1 100644
--- a/docs/LangRef.rst
+++ b/docs/LangRef.rst
@@ -1112,6 +1112,9 @@ example:
- Calls to alloca() with variable sizes or constant sizes greater than
``ssp-buffer-size``.
+ Variables that are identified as requiring a protector will be arranged
+ on the stack such that they are adjacent to the stack protector guard.
+
If a function that has an ``ssp`` attribute is inlined into a
function that doesn't have an ``ssp`` attribute, then the resulting
function will have an ``ssp`` attribute.
@@ -1120,6 +1123,17 @@ example:
stack smashing protector. This overrides the ``ssp`` function
attribute.
+ Variables that are identified as requiring a protector will be arranged
+ on the stack such that they are adjacent to the stack protector guard.
+ The specific layout rules are:
+
+ #. Large arrays and structures containing large arrays
+ (``>= ssp-buffer-size``) are closest to the stack protector.
+ #. Small arrays and structures containing small arrays
+ (``< ssp-buffer-size``) are 2nd closest to the protector.
+ #. Variables that have had their address taken are 3rd closest to the
+ protector.
+
If a function that has an ``sspreq`` attribute is inlined into a
function that doesn't have an ``sspreq`` attribute or which has an
``ssp`` or ``sspstrong`` attribute, then the resulting function will have
@@ -1135,6 +1149,17 @@ example:
- Calls to alloca().
- Local variables that have had their address taken.
+ Variables that are identified as requiring a protector will be arranged
+ on the stack such that they are adjacent to the stack protector guard.
+ The specific layout rules are:
+
+ #. Large arrays and structures containing large arrays
+ (``>= ssp-buffer-size``) are closest to the stack protector.
+ #. Small arrays and structures containing small arrays
+ (``< ssp-buffer-size``) are 2nd closest to the protector.
+ #. Variables that have had their address taken are 3rd closest to the
+ protector.
+
This overrides the ``ssp`` function attribute.
If a function that has an ``sspstrong`` attribute is inlined into a
diff --git a/lib/CodeGen/LocalStackSlotAllocation.cpp b/lib/CodeGen/LocalStackSlotAllocation.cpp
index f521548b21..08f0cc2f01 100644
--- a/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -194,6 +194,9 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
+ StackObjSet SmallArrayObjs;
+ StackObjSet AddrOfObjs;
+
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
@@ -206,8 +209,12 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
+ continue;
case StackProtector::SSPLK_SmallArray:
+ SmallArrayObjs.insert(i);
+ continue;
case StackProtector::SSPLK_AddrOf:
+ AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
@@ -218,6 +225,10 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
+ AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index 488e7ecf8c..1215e5a5d1 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -553,6 +553,9 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
+ StackObjSet SmallArrayObjs;
+ StackObjSet AddrOfObjs;
+
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
@@ -572,8 +575,12 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
+ continue;
case StackProtector::SSPLK_SmallArray:
+ SmallArrayObjs.insert(i);
+ continue;
case StackProtector::SSPLK_AddrOf:
+ AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
@@ -584,6 +591,10 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
+ AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
index ad4195b519..e7dafac702 100644
--- a/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -165,6 +165,327 @@ entry:
ret void
}
+define void @layout_sspstrong() sspstrong {
+entry:
+; Expected stack layout for sspstrong is
+; 144 large_nonchar . Group 1, nested arrays,
+; 136 large_char . arrays >= ssp-buffer-size
+; 128 struct_large_char .
+; 96 struct_large_nonchar .
+; 84+8 small_non_char | Group 2, nested arrays,
+; 90 small_char | arrays < ssp-buffer-size
+; 88 struct_small_char |
+; 84 struct_small_nonchar |
+; 80 addrof * Group 3, addr-of local
+; 76 scalar1 + Group 4, everything else
+; 72 scalar2 +
+; 68 scalar3 +
+;
+; CHECK: layout_sspstrong:
+; r[[SP]] is used as an offset into the stack later
+; CHECK: add r[[SP:[0-9]+]], sp, #84
+
+; CHECK: bl get_scalar1
+; CHECK: str r0, [sp, #76]
+; CHECK: bl end_scalar1
+
+; CHECK: bl get_scalar2
+; CHECK: str r0, [sp, #72]
+; CHECK: bl end_scalar2
+
+; CHECK: bl get_scalar3
+; CHECK: str r0, [sp, #68]
+; CHECK: bl end_scalar3
+
+; CHECK: bl get_addrof
+; CHECK: str r0, [sp, #80]
+; CHECK: bl end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: strh r0, [r[[SP]], #8]
+; CHECK: bl end_small_nonchar
+
+; CHECK: bl get_large_nonchar
+; CHECK: str r0, [sp, #144]
+; CHECK: bl end_large_nonchar
+
+; CHECK: bl get_small_char
+; CHECK: strb r0, [sp, #90]
+; CHECK: bl end_small_char
+
+; CHECK: bl get_large_char
+; CHECK: strb r0, [sp, #136]
+; CHECK: bl end_large_char
+
+; CHECK: bl get_struct_large_char
+; CHECK: strb r0, [sp, #128]
+; CHECK: bl end_struct_large_char
+
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #88]
+; CHECK: bl end_struct_small_char
+
+; CHECK: bl get_struct_large_nonchar
+; CHECK: str r0, [sp, #96]
+; CHECK: bl end_struct_large_nonchar
+
+; CHECK: bl get_struct_small_nonchar
+; CHECK: strh r0, [r[[SP]]]
+; CHECK: bl end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspreq() sspreq {
+entry:
+; Expected stack layout for sspreq is the same as sspstrong
+;
+; CHECK: layout_sspreq:
+; r[[SP]] is used as an offset into the stack later
+; CHECK: add r[[SP:[0-9]+]], sp, #84
+
+; CHECK: bl get_scalar1
+; CHECK: str r0, [sp, #76]
+; CHECK: bl end_scalar1
+
+; CHECK: bl get_scalar2
+; CHECK: str r0, [sp, #72]
+; CHECK: bl end_scalar2
+
+; CHECK: bl get_scalar3
+; CHECK: str r0, [sp, #68]
+; CHECK: bl end_scalar3
+
+; CHECK: bl get_addrof
+; CHECK: str r0, [sp, #80]
+; CHECK: bl end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: strh r0, [r[[SP]], #8]
+; CHECK: bl end_small_nonchar
+
+; CHECK: bl get_large_nonchar
+; CHECK: str r0, [sp, #144]
+; CHECK: bl end_large_nonchar
+
+; CHECK: bl get_small_char
+; CHECK: strb r0, [sp, #90]
+; CHECK: bl end_small_char
+
+; CHECK: bl get_large_char
+; CHECK: strb r0, [sp, #136]
+; CHECK: bl end_large_char
+
+; CHECK: bl get_struct_large_char
+; CHECK: strb r0, [sp, #128]
+; CHECK: bl end_struct_large_char
+
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #88]
+; CHECK: bl end_struct_small_char
+
+; CHECK: bl get_struct_large_nonchar
+; CHECK: str r0, [sp, #96]
+; CHECK: bl end_struct_large_nonchar
+
+; CHECK: bl get_struct_small_nonchar
+; CHECK: strh r0, [r[[SP]]]
+; CHECK: bl end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @struct_with_protectable_arrays() sspstrong {
+entry:
+; Check to ensure that a structure which contains a small array followed by a
+; large array is assigned to the stack properly as a large object.
+; CHECK: struct_with_protectable_arrays:
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #68]
+; CHECK: bl end_struct_small_char
+; CHECK: bl get_struct_large_char2
+; CHECK: strb r0, [sp, #106]
+; CHECK: bl end_struct_large_char2
+ %a = alloca %struct.struct_small_char, align 1
+ %b = alloca %struct.struct_large_char2, align 1
+ %d1 = alloca %struct.struct_large_nonchar, align 8
+ %d2 = alloca %struct.struct_small_nonchar, align 2
+ %call = call signext i8 @get_struct_small_char()
+ %foo = getelementptr inbounds %struct.struct_small_char* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8]* %foo, i32 0, i64 0
+ store i8 %call, i8* %arrayidx, align 1
+ call void @end_struct_small_char()
+ %call1 = call signext i8 @get_struct_large_char2()
+ %foo2 = getelementptr inbounds %struct.struct_large_char2* %b, i32 0, i32 1
+ %arrayidx3 = getelementptr inbounds [8 x i8]* %foo2, i32 0, i64 0
+ store i8 %call1, i8* %arrayidx3, align 1
+ call void @end_struct_large_char2()
+ %0 = bitcast %struct.struct_large_char2* %b to %struct.struct_large_char*
+ %coerce.dive = getelementptr %struct.struct_large_char* %0, i32 0, i32 0
+ %1 = bitcast [8 x i8]* %coerce.dive to i64*
+ %2 = load i64* %1, align 1
+ %coerce.dive4 = getelementptr %struct.struct_small_char* %a, i32 0, i32 0
+ %3 = bitcast [2 x i8]* %coerce.dive4 to i16*
+ %4 = load i16* %3, align 1
+ %coerce.dive5 = getelementptr %struct.struct_small_nonchar* %d2, i32 0, i32 0
+ %5 = bitcast [2 x i16]* %coerce.dive5 to i32*
+ %6 = load i32* %5, align 1
+ call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 8 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
+ ret void
+}
+
declare i32 @get_scalar1()
declare void @end_scalar1()
diff --git a/test/CodeGen/X86/ssp-data-layout.ll b/test/CodeGen/X86/ssp-data-layout.ll
index 72194af96f..e76ad7b871 100644
--- a/test/CodeGen/X86/ssp-data-layout.ll
+++ b/test/CodeGen/X86/ssp-data-layout.ll
@@ -21,7 +21,6 @@
; on a non-linux target the data layout rules are triggered.
%struct.struct_large_char = type { [8 x i8] }
-%struct.struct_large_char2 = type { [2 x i8], [8 x i8] }
%struct.struct_small_char = type { [2 x i8] }
%struct.struct_large_nonchar = type { [8 x i32] }
%struct.struct_small_nonchar = type { [2 x i16] }
@@ -170,6 +169,282 @@ entry:
ret void
}
+define void @layout_sspstrong() nounwind uwtable sspstrong {
+entry:
+; Expected stack layout for sspstrong is
+; -48 large_nonchar . Group 1, nested arrays,
+; -56 large_char . arrays >= ssp-buffer-size
+; -64 struct_large_char .
+; -96 struct_large_nonchar .
+; -100 small_non_char | Group 2, nested arrays,
+; -102 small_char | arrays < ssp-buffer-size
+; -104 struct_small_char |
+; -112 struct_small_nonchar |
+; -116 addrof * Group 3, addr-of local
+; -120 scalar + Group 4, everything else
+; -124 scalar +
+; -128 scalar +
+;
+; CHECK: layout_sspstrong:
+; CHECK: call{{l|q}} get_scalar1
+; CHECK: movl %eax, -120(
+; CHECK: call{{l|q}} end_scalar1
+
+; CHECK: call{{l|q}} get_scalar2
+; CHECK: movl %eax, -124(
+; CHECK: call{{l|q}} end_scalar2
+
+; CHECK: call{{l|q}} get_scalar3
+; CHECK: movl %eax, -128(
+; CHECK: call{{l|q}} end_scalar3
+
+; CHECK: call{{l|q}} get_addrof
+; CHECK: movl %eax, -116(
+; CHECK: call{{l|q}} end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: movw %ax, -100(
+; CHECK: call{{l|q}} end_small_nonchar
+
+; CHECK: call{{l|q}} get_large_nonchar
+; CHECK: movl %eax, -48(
+; CHECK: call{{l|q}} end_large_nonchar
+
+; CHECK: call{{l|q}} get_small_char
+; CHECK: movb %al, -102(
+; CHECK: call{{l|q}} end_small_char
+
+; CHECK: call{{l|q}} get_large_char
+; CHECK: movb %al, -56(
+; CHECK: call{{l|q}} end_large_char
+
+; CHECK: call{{l|q}} get_struct_large_char
+; CHECK: movb %al, -64(
+; CHECK: call{{l|q}} end_struct_large_char
+
+; CHECK: call{{l|q}} get_struct_small_char
+; CHECK: movb %al, -104(
+; CHECK: call{{l|q}} end_struct_small_char
+
+; CHECK: call{{l|q}} get_struct_large_nonchar
+; CHECK: movl %eax, -96(
+; CHECK: call{{l|q}} end_struct_large_nonchar
+
+; CHECK: call{{l|q}} get_struct_small_nonchar
+; CHECK: movw %ax, -112(
+; CHECK: call{{l|q}} end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspreq() nounwind uwtable sspreq {
+entry:
+; Expected stack layout for sspreq is the same as sspstrong
+;
+; CHECK: layout_sspreq:
+; CHECK: call{{l|q}} get_scalar1
+; CHECK: movl %eax, -120(
+; CHECK: call{{l|q}} end_scalar1
+
+; CHECK: call{{l|q}} get_scalar2
+; CHECK: movl %eax, -124(
+; CHECK: call{{l|q}} end_scalar2
+
+; CHECK: call{{l|q}} get_scalar3
+; CHECK: movl %eax, -128(
+; CHECK: call{{l|q}} end_scalar3
+
+; CHECK: call{{l|q}} get_addrof
+; CHECK: movl %eax, -116(
+; CHECK: call{{l|q}} end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: movw %ax, -100(
+; CHECK: call{{l|q}} end_small_nonchar
+
+; CHECK: call{{l|q}} get_large_nonchar
+; CHECK: movl %eax, -48(
+; CHECK: call{{l|q}} end_large_nonchar
+
+; CHECK: call{{l|q}} get_small_char
+; CHECK: movb %al, -102(
+; CHECK: call{{l|q}} end_small_char
+
+; CHECK: call{{l|q}} get_large_char
+; CHECK: movb %al, -56(
+; CHECK: call{{l|q}} end_large_char
+
+; CHECK: call{{l|q}} get_struct_large_char
+; CHECK: movb %al, -64(
+; CHECK: call{{l|q}} end_struct_large_char
+
+; CHECK: call{{l|q}} get_struct_small_char
+; CHECK: movb %al, -104(
+; CHECK: call{{l|q}} end_struct_small_char
+
+; CHECK: call{{l|q}} get_struct_large_nonchar
+; CHECK: movl %eax, -96(
+; CHECK: call{{l|q}} end_struct_large_nonchar
+
+; CHECK: call{{l|q}} get_struct_small_nonchar
+; CHECK: movw %ax, -112(
+; CHECK: call{{l|q}} end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
define void @fast_non_linux() ssp {
entry:
; FAST-NON-LIN: fast_non_linux:
@@ -222,9 +497,6 @@ declare void @end_large_char()
declare signext i8 @get_struct_large_char()
declare void @end_struct_large_char()
-declare signext i8 @get_struct_large_char2()
-declare void @end_struct_large_char2()
-
declare signext i8 @get_struct_small_char()
declare void @end_struct_small_char()