summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorManman Ren <manman.ren@gmail.com>2013-08-21 22:20:53 +0000
committerManman Ren <manman.ren@gmail.com>2013-08-21 22:20:53 +0000
commitbf9d6e5c3743e873164765cbe38cc1bd10ee18ab (patch)
treec3f45b5fa047527d7c10bbb4b418a8ad0a5940ed /test/CodeGen
parent23dcb187fb2a195ad7dbc35d3cee2c33edb7bfc3 (diff)
downloadllvm-bf9d6e5c3743e873164765cbe38cc1bd10ee18ab.tar.gz
llvm-bf9d6e5c3743e873164765cbe38cc1bd10ee18ab.tar.bz2
llvm-bf9d6e5c3743e873164765cbe38cc1bd10ee18ab.tar.xz
TBAA: remove !tbaa from testing cases when they are not needed.
This will make it easier to turn on struct-path aware TBAA since the metadata format will change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188944 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll11
-rw-r--r--test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll41
-rw-r--r--test/CodeGen/ARM/neon_spill.ll11
-rw-r--r--test/CodeGen/Hexagon/combine_ir.ll15
-rw-r--r--test/CodeGen/Hexagon/memops.ll509
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll29
-rw-r--r--test/CodeGen/Mips/ra-allocatable.ll367
-rw-r--r--test/CodeGen/PowerPC/bdzlr.ll11
-rw-r--r--test/CodeGen/PowerPC/cr-spills.ll105
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll27
-rw-r--r--test/CodeGen/PowerPC/pr15031.ll26
-rw-r--r--test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll9
-rw-r--r--test/CodeGen/X86/2011-10-12-MachineCSE.ll29
-rw-r--r--test/CodeGen/X86/coalesce-implicitdef.ll35
-rw-r--r--test/CodeGen/X86/pr14090.ll10
15 files changed, 574 insertions, 661 deletions
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index f689d4950e..bc72e126b4 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -21,8 +21,8 @@ for.body: ; preds = %_Z14printIsNotZeroi
%x = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 0
%y = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 1
%inc = add i32 %i.022, 1
- %tmp8 = load i32* %x, align 4, !tbaa !0
- %tmp11 = load i32* %y, align 4, !tbaa !0
+ %tmp8 = load i32* %x, align 4
+ %tmp11 = load i32* %y, align 4
%mul = mul nsw i32 %tmp11, %tmp8
%tobool.i14 = icmp eq i32 %mul, 0
br i1 %tobool.i14, label %_Z14printIsNotZeroi.exit17, label %if.then.i16
@@ -35,15 +35,10 @@ _Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi
_Z14printIsNotZeroi.exit17.for.body_crit_edge: ; preds = %_Z14printIsNotZeroi.exit17
%b.phi.trans.insert = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
- %tmp3.pre = load i8* %b.phi.trans.insert, align 1, !tbaa !3
+ %tmp3.pre = load i8* %b.phi.trans.insert, align 1
%phitmp27 = icmp eq i8 undef, 0
br label %for.body
for.end: ; preds = %_Z14printIsNotZeroi.exit17
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"bool", metadata !1}
diff --git a/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll b/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
index 470f2fb587..d9c9cc459c 100644
--- a/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
+++ b/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
@@ -4,7 +4,7 @@ define i32 @t1(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t1
%add.ptr = getelementptr inbounds i32* %ptr, i32 -1
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-4]
ret i32 %0
}
@@ -13,7 +13,7 @@ define i32 @t2(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t2
%add.ptr = getelementptr inbounds i32* %ptr, i32 -63
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-252]
ret i32 %0
}
@@ -22,7 +22,7 @@ define i32 @t3(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t3
%add.ptr = getelementptr inbounds i32* %ptr, i32 -64
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0]
ret i32 %0
}
@@ -31,7 +31,7 @@ define zeroext i16 @t4(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t4
%add.ptr = getelementptr inbounds i16* %ptr, i32 -1
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-2]
ret i16 %0
}
@@ -40,7 +40,7 @@ define zeroext i16 @t5(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t5
%add.ptr = getelementptr inbounds i16* %ptr, i32 -127
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-254]
ret i16 %0
}
@@ -49,7 +49,7 @@ define zeroext i16 @t6(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t6
%add.ptr = getelementptr inbounds i16* %ptr, i32 -128
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0]
ret i16 %0
}
@@ -58,7 +58,7 @@ define zeroext i8 @t7(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t7
%add.ptr = getelementptr inbounds i8* %ptr, i32 -1
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-1]
ret i8 %0
}
@@ -67,7 +67,7 @@ define zeroext i8 @t8(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t8
%add.ptr = getelementptr inbounds i8* %ptr, i32 -255
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-255]
ret i8 %0
}
@@ -76,7 +76,7 @@ define zeroext i8 @t9(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t9
%add.ptr = getelementptr inbounds i8* %ptr, i32 -256
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0]
ret i8 %0
}
@@ -85,7 +85,7 @@ define void @t10(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t10
%add.ptr = getelementptr inbounds i32* %ptr, i32 -1
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-4]
ret void
}
@@ -94,7 +94,7 @@ define void @t11(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t11
%add.ptr = getelementptr inbounds i32* %ptr, i32 -63
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-252]
ret void
}
@@ -103,7 +103,7 @@ define void @t12(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t12
%add.ptr = getelementptr inbounds i32* %ptr, i32 -64
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0]
ret void
}
@@ -112,7 +112,7 @@ define void @t13(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t13
%add.ptr = getelementptr inbounds i16* %ptr, i32 -1
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-2]
ret void
}
@@ -121,7 +121,7 @@ define void @t14(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t14
%add.ptr = getelementptr inbounds i16* %ptr, i32 -127
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-254]
ret void
}
@@ -130,7 +130,7 @@ define void @t15(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t15
%add.ptr = getelementptr inbounds i16* %ptr, i32 -128
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0]
ret void
}
@@ -139,7 +139,7 @@ define void @t16(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t16
%add.ptr = getelementptr inbounds i8* %ptr, i32 -1
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-1]
ret void
}
@@ -148,7 +148,7 @@ define void @t17(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t17
%add.ptr = getelementptr inbounds i8* %ptr, i32 -255
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-255]
ret void
}
@@ -157,12 +157,7 @@ define void @t18(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t18
%add.ptr = getelementptr inbounds i8* %ptr, i32 -256
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0]
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/ARM/neon_spill.ll b/test/CodeGen/ARM/neon_spill.ll
index 277bd05ba3..d286d16486 100644
--- a/test/CodeGen/ARM/neon_spill.ll
+++ b/test/CodeGen/ARM/neon_spill.ll
@@ -24,7 +24,7 @@ declare arm_aapcs_vfpcc %2** @func4()
define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
call void @llvm.arm.neon.vst4.v4i32(i8* undef, <4 x i32> <i32 0, i32 1065353216, i32 1073741824, i32 1077936128>, <4 x i32> <i32 1082130432, i32 1084227584, i32 1086324736, i32 1088421888>, <4 x i32> <i32 1090519040, i32 1091567616, i32 1092616192, i32 1093664768>, <4 x i32> <i32 1094713344, i32 1095761920, i32 1096810496, i32 1097859072>, i32 16) nounwind
%2 = call arm_aapcs_vfpcc %0** @func2() nounwind
- %3 = load %0** %2, align 4, !tbaa !0
+ %3 = load %0** %2, align 4
store float 0.000000e+00, float* undef, align 4
%4 = call arm_aapcs_vfpcc %2* @func3(%2* undef, %2* undef, i32 2956) nounwind
call arm_aapcs_vfpcc void @func1(%0* %3, float* undef, float* undef, %2* undef)
@@ -35,11 +35,11 @@ define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
%6 = call arm_aapcs_vfpcc %2** @func4() nounwind
%7 = call arm_aapcs_vfpcc %2* @func3(%2* undef, %2* undef, i32 2971) nounwind
%8 = fadd float undef, -1.000000e+05
- store float %8, float* undef, align 16, !tbaa !3
+ store float %8, float* undef, align 16
%9 = call arm_aapcs_vfpcc i32 @rand() nounwind
%10 = fmul float undef, 2.000000e+05
%11 = fadd float %10, -1.000000e+05
- store float %11, float* undef, align 4, !tbaa !3
+ store float %11, float* undef, align 4
call void @llvm.arm.neon.vst4.v4i32(i8* undef, <4 x i32> <i32 0, i32 1065353216, i32 1073741824, i32 1077936128>, <4 x i32> <i32 1082130432, i32 1084227584, i32 1086324736, i32 1088421888>, <4 x i32> <i32 1090519040, i32 1091567616, i32 1092616192, i32 1093664768>, <4 x i32> <i32 1094713344, i32 1095761920, i32 1096810496, i32 1097859072>, i32 16) nounwind
ret void
}
@@ -47,8 +47,3 @@ define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
declare arm_aapcs_vfpcc i32 @rand()
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"float", metadata !1}
diff --git a/test/CodeGen/Hexagon/combine_ir.ll b/test/CodeGen/Hexagon/combine_ir.ll
index 8b99ef7155..e100cf7196 100644
--- a/test/CodeGen/Hexagon/combine_ir.ll
+++ b/test/CodeGen/Hexagon/combine_ir.ll
@@ -4,7 +4,7 @@
define void @word(i32* nocapture %a) nounwind {
entry:
- %0 = load i32* %a, align 4, !tbaa !0
+ %0 = load i32* %a, align 4
%1 = zext i32 %0 to i64
tail call void @bar(i64 %1) nounwind
ret void
@@ -17,10 +17,10 @@ declare void @bar(i64)
define void @halfword(i16* nocapture %a) nounwind {
entry:
- %0 = load i16* %a, align 2, !tbaa !3
+ %0 = load i16* %a, align 2
%1 = zext i16 %0 to i64
%add.ptr = getelementptr inbounds i16* %a, i32 1
- %2 = load i16* %add.ptr, align 2, !tbaa !3
+ %2 = load i16* %add.ptr, align 2
%3 = zext i16 %2 to i64
%4 = shl nuw nsw i64 %3, 16
%ins = or i64 %4, %1
@@ -33,18 +33,13 @@ entry:
define void @byte(i8* nocapture %a) nounwind {
entry:
- %0 = load i8* %a, align 1, !tbaa !1
+ %0 = load i8* %a, align 1
%1 = zext i8 %0 to i64
%add.ptr = getelementptr inbounds i8* %a, i32 1
- %2 = load i8* %add.ptr, align 1, !tbaa !1
+ %2 = load i8* %add.ptr, align 1
%3 = zext i8 %2 to i64
%4 = shl nuw nsw i64 %3, 8
%ins = or i64 %4, %1
tail call void @bar(i64 %ins) nounwind
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/Hexagon/memops.ll b/test/CodeGen/Hexagon/memops.ll
index 5498848d85..fca1a73811 100644
--- a/test/CodeGen/Hexagon/memops.ll
+++ b/test/CodeGen/Hexagon/memops.ll
@@ -4,11 +4,11 @@
define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
@@ -16,11 +16,11 @@ define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv1 = zext i8 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %p, align 1, !tbaa !0
+ store i8 %conv2, i8* %p, align 1
ret void
}
@@ -28,51 +28,51 @@ define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv1 = zext i8 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %p, align 1, !tbaa !0
+ store i8 %conv2, i8* %p, align 1
ret void
}
define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %p, align 1, !tbaa !0
+ store i8 %or3, i8* %p, align 1
ret void
}
define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %p, align 1, !tbaa !0
+ store i8 %and3, i8* %p, align 1
ret void
}
define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv = zext i8 %0 to i32
%and = and i32 %conv, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
@@ -80,11 +80,11 @@ define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -93,11 +93,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -106,11 +106,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -118,9 +118,9 @@ define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %or3, i8* %add.ptr, align 1
ret void
}
@@ -128,9 +128,9 @@ define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %and3, i8* %add.ptr, align 1
ret void
}
@@ -138,11 +138,11 @@ define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%and = and i32 %conv, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -150,11 +150,11 @@ define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -162,11 +162,11 @@ define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -175,11 +175,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -188,11 +188,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -200,9 +200,9 @@ define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) noun
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %or3, i8* %add.ptr, align 1
ret void
}
@@ -210,9 +210,9 @@ define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nou
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %and3, i8* %add.ptr, align 1
ret void
}
@@ -220,11 +220,11 @@ define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%and = and i32 %conv, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -232,22 +232,22 @@ define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
define void @memop_signed_char_add5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv2 = zext i8 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
@@ -255,11 +255,11 @@ define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv13 = zext i8 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %p, align 1, !tbaa !0
+ store i8 %conv2, i8* %p, align 1
ret void
}
@@ -267,51 +267,51 @@ define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv13 = zext i8 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %p, align 1, !tbaa !0
+ store i8 %conv2, i8* %p, align 1
ret void
}
define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %p, align 1, !tbaa !0
+ store i8 %or3, i8* %p, align 1
ret void
}
define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %p, align 1, !tbaa !0
+ store i8 %and3, i8* %p, align 1
ret void
}
define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv2 = zext i8 %0 to i32
%and = and i32 %conv2, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
define void @memop_signed_char_setbit(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i8* %p, align 1, !tbaa !0
+ %0 = load i8* %p, align 1
%conv2 = zext i8 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %p, align 1, !tbaa !0
+ store i8 %conv1, i8* %p, align 1
ret void
}
@@ -319,11 +319,11 @@ define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -332,11 +332,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -345,11 +345,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -357,9 +357,9 @@ define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x)
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %or3, i8* %add.ptr, align 1
ret void
}
@@ -367,9 +367,9 @@ define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %and3, i8* %add.ptr, align 1
ret void
}
@@ -377,11 +377,11 @@ define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%and = and i32 %conv2, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -389,11 +389,11 @@ define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 %i
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -401,11 +401,11 @@ define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -414,11 +414,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -427,11 +427,11 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr, align 1
ret void
}
@@ -439,9 +439,9 @@ define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwi
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
- store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %or3, i8* %add.ptr, align 1
ret void
}
@@ -449,9 +449,9 @@ define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounw
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
- store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %and3, i8* %add.ptr, align 1
ret void
}
@@ -459,11 +459,11 @@ define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%and = and i32 %conv2, 223
%conv1 = trunc i32 %and to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -471,22 +471,22 @@ define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i8* %p, i32 5
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv = zext i16 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
@@ -494,11 +494,11 @@ define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwin
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv1 = zext i16 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %p, align 2, !tbaa !2
+ store i16 %conv2, i16* %p, align 2
ret void
}
@@ -506,51 +506,51 @@ define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwin
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv1 = zext i16 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %p, align 2, !tbaa !2
+ store i16 %conv2, i16* %p, align 2
ret void
}
define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %p, align 2, !tbaa !2
+ store i16 %or3, i16* %p, align 2
ret void
}
define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %p, align 2, !tbaa !2
+ store i16 %and3, i16* %p, align 2
ret void
}
define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv = zext i16 %0 to i32
%and = and i32 %conv, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv = zext i16 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
@@ -558,11 +558,11 @@ define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -571,11 +571,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -584,11 +584,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -596,9 +596,9 @@ define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroex
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %or3, i16* %add.ptr, align 2
ret void
}
@@ -606,9 +606,9 @@ define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroe
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %and3, i16* %add.ptr, align 2
ret void
}
@@ -616,11 +616,11 @@ define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwi
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%and = and i32 %conv, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -628,11 +628,11 @@ define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwi
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -640,11 +640,11 @@ define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%add = add nsw i32 %conv, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -653,11 +653,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%add = add nsw i32 %conv1, %conv
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -666,11 +666,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%sub = sub nsw i32 %conv1, %conv
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -678,9 +678,9 @@ define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) n
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %or3, i16* %add.ptr, align 2
ret void
}
@@ -688,9 +688,9 @@ define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x)
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %and3, i16* %add.ptr, align 2
ret void
}
@@ -698,11 +698,11 @@ define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%and = and i32 %conv, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -710,22 +710,22 @@ define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%or = or i32 %conv, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
define void @memop_signed_short_add5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv2 = zext i16 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
@@ -733,11 +733,11 @@ define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv13 = zext i16 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %p, align 2, !tbaa !2
+ store i16 %conv2, i16* %p, align 2
ret void
}
@@ -745,51 +745,51 @@ define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv13 = zext i16 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %p, align 2, !tbaa !2
+ store i16 %conv2, i16* %p, align 2
ret void
}
define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %p, align 2, !tbaa !2
+ store i16 %or3, i16* %p, align 2
ret void
}
define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %p, align 2, !tbaa !2
+ store i16 %and3, i16* %p, align 2
ret void
}
define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv2 = zext i16 %0 to i32
%and = and i32 %conv2, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
define void @memop_signed_short_setbit(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i16* %p, align 2, !tbaa !2
+ %0 = load i16* %p, align 2
%conv2 = zext i16 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %p, align 2, !tbaa !2
+ store i16 %conv1, i16* %p, align 2
ret void
}
@@ -797,11 +797,11 @@ define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -810,11 +810,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -823,11 +823,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -835,9 +835,9 @@ define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %or3, i16* %add.ptr, align 2
ret void
}
@@ -845,9 +845,9 @@ define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %and3, i16* %add.ptr, align 2
ret void
}
@@ -855,11 +855,11 @@ define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%and = and i32 %conv2, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -867,11 +867,11 @@ define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 %i
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -879,11 +879,11 @@ define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%add = add nsw i32 %conv2, 5
%conv1 = trunc i32 %add to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -892,11 +892,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%add = add nsw i32 %conv13, %conv4
%conv2 = trunc i32 %add to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -905,11 +905,11 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv2, i16* %add.ptr, align 2
ret void
}
@@ -917,9 +917,9 @@ define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nou
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
- store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %or3, i16* %add.ptr, align 2
ret void
}
@@ -927,9 +927,9 @@ define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) no
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
- store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %and3, i16* %add.ptr, align 2
ret void
}
@@ -937,11 +937,11 @@ define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%and = and i32 %conv2, 65503
%conv1 = trunc i32 %and to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -949,74 +949,74 @@ define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i16* %p, i32 5
- %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%or = or i32 %conv2, 128
%conv1 = trunc i32 %or to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
define void @memop_signed_int_add5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%add = add i32 %0, 5
- store i32 %add, i32* %p, align 4, !tbaa !3
+ store i32 %add, i32* %p, align 4
ret void
}
define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%add = add i32 %0, %x
- store i32 %add, i32* %p, align 4, !tbaa !3
+ store i32 %add, i32* %p, align 4
ret void
}
define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%sub = sub i32 %0, %x
- store i32 %sub, i32* %p, align 4, !tbaa !3
+ store i32 %sub, i32* %p, align 4
ret void
}
define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %p, align 4, !tbaa !3
+ store i32 %or, i32* %p, align 4
ret void
}
define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %p, align 4, !tbaa !3
+ store i32 %and, i32* %p, align 4
ret void
}
define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %p, align 4, !tbaa !3
+ store i32 %and, i32* %p, align 4
ret void
}
define void @memop_signed_int_setbit(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %p, align 4, !tbaa !3
+ store i32 %or, i32* %p, align 4
ret void
}
@@ -1024,9 +1024,9 @@ define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add i32 %0, 5
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1034,9 +1034,9 @@ define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounw
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add i32 %0, %x
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1044,9 +1044,9 @@ define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounw
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%sub = sub i32 %0, %x
- store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %sub, i32* %add.ptr, align 4
ret void
}
@@ -1054,9 +1054,9 @@ define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwi
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1064,9 +1064,9 @@ define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounw
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1074,9 +1074,9 @@ define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1084,9 +1084,9 @@ define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1094,9 +1094,9 @@ define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add i32 %0, 5
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1104,9 +1104,9 @@ define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add i32 %0, %x
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1114,9 +1114,9 @@ define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%sub = sub i32 %0, %x
- store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %sub, i32* %add.ptr, align 4
ret void
}
@@ -1124,9 +1124,9 @@ define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1134,9 +1134,9 @@ define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1144,9 +1144,9 @@ define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1154,72 +1154,72 @@ define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%add = add nsw i32 %0, 5
- store i32 %add, i32* %p, align 4, !tbaa !3
+ store i32 %add, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%add = add nsw i32 %0, %x
- store i32 %add, i32* %p, align 4, !tbaa !3
+ store i32 %add, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%sub = sub nsw i32 %0, %x
- store i32 %sub, i32* %p, align 4, !tbaa !3
+ store i32 %sub, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %p, align 4, !tbaa !3
+ store i32 %or, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %p, align 4, !tbaa !3
+ store i32 %and, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %p, align 4, !tbaa !3
+ store i32 %and, i32* %p, align 4
ret void
}
define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %0 = load i32* %p, align 4, !tbaa !3
+ %0 = load i32* %p, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %p, align 4, !tbaa !3
+ store i32 %or, i32* %p, align 4
ret void
}
@@ -1227,9 +1227,9 @@ define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, 5
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1237,9 +1237,9 @@ define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nou
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, %x
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1247,9 +1247,9 @@ define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nou
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%sub = sub nsw i32 %0, %x
- store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %sub, i32* %add.ptr, align 4
ret void
}
@@ -1257,9 +1257,9 @@ define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) noun
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1267,9 +1267,9 @@ define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nou
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1277,9 +1277,9 @@ define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1287,9 +1287,9 @@ define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 %i
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1297,9 +1297,9 @@ define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, 5
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1307,9 +1307,9 @@ define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, %x
- store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %add, i32* %add.ptr, align 4
ret void
}
@@ -1317,9 +1317,9 @@ define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%sub = sub nsw i32 %0, %x
- store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %sub, i32* %add.ptr, align 4
ret void
}
@@ -1327,9 +1327,9 @@ define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
@@ -1337,9 +1337,9 @@ define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1347,9 +1347,9 @@ define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
- store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %and, i32* %add.ptr, align 4
ret void
}
@@ -1357,13 +1357,8 @@ define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
%add.ptr = getelementptr inbounds i32* %p, i32 5
- %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
- store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ store i32 %or, i32* %add.ptr, align 4
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
-!2 = metadata !{metadata !"short", metadata !0}
-!3 = metadata !{metadata !"int", metadata !0}
diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll
index 0a8f85f482..e17830d33a 100644
--- a/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/test/CodeGen/Mips/o32_cc_byval.ll
@@ -61,17 +61,17 @@ entry:
; CHECK: mfc1 $6, $f[[F0]]
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp = load i32* %i2, align 4, !tbaa !0
+ %tmp = load i32* %i2, align 4
%d = getelementptr inbounds %struct.S1* %s1, i32 0, i32 4
- %tmp1 = load double* %d, align 8, !tbaa !3
+ %tmp1 = load double* %d, align 8
%ll = getelementptr inbounds %struct.S1* %s1, i32 0, i32 3
- %tmp2 = load i64* %ll, align 8, !tbaa !4
+ %tmp2 = load i64* %ll, align 8
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp3 = load i32* %i, align 4, !tbaa !0
+ %tmp3 = load i32* %i, align 4
%s = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1
- %tmp4 = load i16* %s, align 2, !tbaa !5
+ %tmp4 = load i16* %s, align 2
%c = getelementptr inbounds %struct.S1* %s1, i32 0, i32 0
- %tmp5 = load i8* %c, align 1, !tbaa !1
+ %tmp5 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
ret void
}
@@ -90,9 +90,9 @@ entry:
; CHECK: sw $[[R0]], 24($sp)
%arrayidx = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 0
- %tmp = load i32* %arrayidx, align 4, !tbaa !0
+ %tmp = load i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 3
- %tmp3 = load i32* %arrayidx2, align 4, !tbaa !0
+ %tmp3 = load i32* %arrayidx2, align 4
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
ret void
}
@@ -110,11 +110,11 @@ entry:
; CHECK: sw $[[R1]], 24($sp)
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp = load i32* %i, align 4, !tbaa !0
+ %tmp = load i32* %i, align 4
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp1 = load i32* %i2, align 4, !tbaa !0
+ %tmp1 = load i32* %i2, align 4
%c = getelementptr inbounds %struct.S3* %s3, i32 0, i32 0
- %tmp2 = load i8* %c, align 1, !tbaa !1
+ %tmp2 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
ret void
}
@@ -128,10 +128,3 @@ entry:
}
declare void @f6(%struct.S4* nocapture byval, i64)
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"double", metadata !1}
-!4 = metadata !{metadata !"long long", metadata !1}
-!5 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/Mips/ra-allocatable.ll b/test/CodeGen/Mips/ra-allocatable.ll
index 7621788682..afc5cb0c25 100644
--- a/test/CodeGen/Mips/ra-allocatable.ll
+++ b/test/CodeGen/Mips/ra-allocatable.ll
@@ -98,191 +98,186 @@ entry:
; CHECK: lw $ra, {{[0-9]+}}($sp) # 4-byte Folded Reload
; CHECK: jr $ra
- %0 = load i32* @a0, align 4, !tbaa !0
- %1 = load i32** @b0, align 4, !tbaa !3
- store i32 %0, i32* %1, align 4, !tbaa !0
- %2 = load i32* @a1, align 4, !tbaa !0
- %3 = load i32** @b1, align 4, !tbaa !3
- store i32 %2, i32* %3, align 4, !tbaa !0
- %4 = load i32* @a2, align 4, !tbaa !0
- %5 = load i32** @b2, align 4, !tbaa !3
- store i32 %4, i32* %5, align 4, !tbaa !0
- %6 = load i32* @a3, align 4, !tbaa !0
- %7 = load i32** @b3, align 4, !tbaa !3
- store i32 %6, i32* %7, align 4, !tbaa !0
- %8 = load i32* @a4, align 4, !tbaa !0
- %9 = load i32** @b4, align 4, !tbaa !3
- store i32 %8, i32* %9, align 4, !tbaa !0
- %10 = load i32* @a5, align 4, !tbaa !0
- %11 = load i32** @b5, align 4, !tbaa !3
- store i32 %10, i32* %11, align 4, !tbaa !0
- %12 = load i32* @a6, align 4, !tbaa !0
- %13 = load i32** @b6, align 4, !tbaa !3
- store i32 %12, i32* %13, align 4, !tbaa !0
- %14 = load i32* @a7, align 4, !tbaa !0
- %15 = load i32** @b7, align 4, !tbaa !3
- store i32 %14, i32* %15, align 4, !tbaa !0
- %16 = load i32* @a8, align 4, !tbaa !0
- %17 = load i32** @b8, align 4, !tbaa !3
- store i32 %16, i32* %17, align 4, !tbaa !0
- %18 = load i32* @a9, align 4, !tbaa !0
- %19 = load i32** @b9, align 4, !tbaa !3
- store i32 %18, i32* %19, align 4, !tbaa !0
- %20 = load i32* @a10, align 4, !tbaa !0
- %21 = load i32** @b10, align 4, !tbaa !3
- store i32 %20, i32* %21, align 4, !tbaa !0
- %22 = load i32* @a11, align 4, !tbaa !0
- %23 = load i32** @b11, align 4, !tbaa !3
- store i32 %22, i32* %23, align 4, !tbaa !0
- %24 = load i32* @a12, align 4, !tbaa !0
- %25 = load i32** @b12, align 4, !tbaa !3
- store i32 %24, i32* %25, align 4, !tbaa !0
- %26 = load i32* @a13, align 4, !tbaa !0
- %27 = load i32** @b13, align 4, !tbaa !3
- store i32 %26, i32* %27, align 4, !tbaa !0
- %28 = load i32* @a14, align 4, !tbaa !0
- %29 = load i32** @b14, align 4, !tbaa !3
- store i32 %28, i32* %29, align 4, !tbaa !0
- %30 = load i32* @a15, align 4, !tbaa !0
- %31 = load i32** @b15, align 4, !tbaa !3
- store i32 %30, i32* %31, align 4, !tbaa !0
- %32 = load i32* @a16, align 4, !tbaa !0
- %33 = load i32** @b16, align 4, !tbaa !3
- store i32 %32, i32* %33, align 4, !tbaa !0
- %34 = load i32* @a17, align 4, !tbaa !0
- %35 = load i32** @b17, align 4, !tbaa !3
- store i32 %34, i32* %35, align 4, !tbaa !0
- %36 = load i32* @a18, align 4, !tbaa !0
- %37 = load i32** @b18, align 4, !tbaa !3
- store i32 %36, i32* %37, align 4, !tbaa !0
- %38 = load i32* @a19, align 4, !tbaa !0
- %39 = load i32** @b19, align 4, !tbaa !3
- store i32 %38, i32* %39, align 4, !tbaa !0
- %40 = load i32* @a20, align 4, !tbaa !0
- %41 = load i32** @b20, align 4, !tbaa !3
- store i32 %40, i32* %41, align 4, !tbaa !0
- %42 = load i32* @a21, align 4, !tbaa !0
- %43 = load i32** @b21, align 4, !tbaa !3
- store i32 %42, i32* %43, align 4, !tbaa !0
- %44 = load i32* @a22, align 4, !tbaa !0
- %45 = load i32** @b22, align 4, !tbaa !3
- store i32 %44, i32* %45, align 4, !tbaa !0
- %46 = load i32* @a23, align 4, !tbaa !0
- %47 = load i32** @b23, align 4, !tbaa !3
- store i32 %46, i32* %47, align 4, !tbaa !0
- %48 = load i32* @a24, align 4, !tbaa !0
- %49 = load i32** @b24, align 4, !tbaa !3
- store i32 %48, i32* %49, align 4, !tbaa !0
- %50 = load i32* @a25, align 4, !tbaa !0
- %51 = load i32** @b25, align 4, !tbaa !3
- store i32 %50, i32* %51, align 4, !tbaa !0
- %52 = load i32* @a26, align 4, !tbaa !0
- %53 = load i32** @b26, align 4, !tbaa !3
- store i32 %52, i32* %53, align 4, !tbaa !0
- %54 = load i32* @a27, align 4, !tbaa !0
- %55 = load i32** @b27, align 4, !tbaa !3
- store i32 %54, i32* %55, align 4, !tbaa !0
- %56 = load i32* @a28, align 4, !tbaa !0
- %57 = load i32** @b28, align 4, !tbaa !3
- store i32 %56, i32* %57, align 4, !tbaa !0
- %58 = load i32* @a29, align 4, !tbaa !0
- %59 = load i32** @b29, align 4, !tbaa !3
- store i32 %58, i32* %59, align 4, !tbaa !0
- %60 = load i32* @a0, align 4, !tbaa !0
- %61 = load i32** @c0, align 4, !tbaa !3
- store i32 %60, i32* %61, align 4, !tbaa !0
- %62 = load i32* @a1, align 4, !tbaa !0
- %63 = load i32** @c1, align 4, !tbaa !3
- store i32 %62, i32* %63, align 4, !tbaa !0
- %64 = load i32* @a2, align 4, !tbaa !0
- %65 = load i32** @c2, align 4, !tbaa !3
- store i32 %64, i32* %65, align 4, !tbaa !0
- %66 = load i32* @a3, align 4, !tbaa !0
- %67 = load i32** @c3, align 4, !tbaa !3
- store i32 %66, i32* %67, align 4, !tbaa !0
- %68 = load i32* @a4, align 4, !tbaa !0
- %69 = load i32** @c4, align 4, !tbaa !3
- store i32 %68, i32* %69, align 4, !tbaa !0
- %70 = load i32* @a5, align 4, !tbaa !0
- %71 = load i32** @c5, align 4, !tbaa !3
- store i32 %70, i32* %71, align 4, !tbaa !0
- %72 = load i32* @a6, align 4, !tbaa !0
- %73 = load i32** @c6, align 4, !tbaa !3
- store i32 %72, i32* %73, align 4, !tbaa !0
- %74 = load i32* @a7, align 4, !tbaa !0
- %75 = load i32** @c7, align 4, !tbaa !3
- store i32 %74, i32* %75, align 4, !tbaa !0
- %76 = load i32* @a8, align 4, !tbaa !0
- %77 = load i32** @c8, align 4, !tbaa !3
- store i32 %76, i32* %77, align 4, !tbaa !0
- %78 = load i32* @a9, align 4, !tbaa !0
- %79 = load i32** @c9, align 4, !tbaa !3
- store i32 %78, i32* %79, align 4, !tbaa !0
- %80 = load i32* @a10, align 4, !tbaa !0
- %81 = load i32** @c10, align 4, !tbaa !3
- store i32 %80, i32* %81, align 4, !tbaa !0
- %82 = load i32* @a11, align 4, !tbaa !0
- %83 = load i32** @c11, align 4, !tbaa !3
- store i32 %82, i32* %83, align 4, !tbaa !0
- %84 = load i32* @a12, align 4, !tbaa !0
- %85 = load i32** @c12, align 4, !tbaa !3
- store i32 %84, i32* %85, align 4, !tbaa !0
- %86 = load i32* @a13, align 4, !tbaa !0
- %87 = load i32** @c13, align 4, !tbaa !3
- store i32 %86, i32* %87, align 4, !tbaa !0
- %88 = load i32* @a14, align 4, !tbaa !0
- %89 = load i32** @c14, align 4, !tbaa !3
- store i32 %88, i32* %89, align 4, !tbaa !0
- %90 = load i32* @a15, align 4, !tbaa !0
- %91 = load i32** @c15, align 4, !tbaa !3
- store i32 %90, i32* %91, align 4, !tbaa !0
- %92 = load i32* @a16, align 4, !tbaa !0
- %93 = load i32** @c16, align 4, !tbaa !3
- store i32 %92, i32* %93, align 4, !tbaa !0
- %94 = load i32* @a17, align 4, !tbaa !0
- %95 = load i32** @c17, align 4, !tbaa !3
- store i32 %94, i32* %95, align 4, !tbaa !0
- %96 = load i32* @a18, align 4, !tbaa !0
- %97 = load i32** @c18, align 4, !tbaa !3
- store i32 %96, i32* %97, align 4, !tbaa !0
- %98 = load i32* @a19, align 4, !tbaa !0
- %99 = load i32** @c19, align 4, !tbaa !3
- store i32 %98, i32* %99, align 4, !tbaa !0
- %100 = load i32* @a20, align 4, !tbaa !0
- %101 = load i32** @c20, align 4, !tbaa !3
- store i32 %100, i32* %101, align 4, !tbaa !0
- %102 = load i32* @a21, align 4, !tbaa !0
- %103 = load i32** @c21, align 4, !tbaa !3
- store i32 %102, i32* %103, align 4, !tbaa !0
- %104 = load i32* @a22, align 4, !tbaa !0
- %105 = load i32** @c22, align 4, !tbaa !3
- store i32 %104, i32* %105, align 4, !tbaa !0
- %106 = load i32* @a23, align 4, !tbaa !0
- %107 = load i32** @c23, align 4, !tbaa !3
- store i32 %106, i32* %107, align 4, !tbaa !0
- %108 = load i32* @a24, align 4, !tbaa !0
- %109 = load i32** @c24, align 4, !tbaa !3
- store i32 %108, i32* %109, align 4, !tbaa !0
- %110 = load i32* @a25, align 4, !tbaa !0
- %111 = load i32** @c25, align 4, !tbaa !3
- store i32 %110, i32* %111, align 4, !tbaa !0
- %112 = load i32* @a26, align 4, !tbaa !0
- %113 = load i32** @c26, align 4, !tbaa !3
- store i32 %112, i32* %113, align 4, !tbaa !0
- %114 = load i32* @a27, align 4, !tbaa !0
- %115 = load i32** @c27, align 4, !tbaa !3
- store i32 %114, i32* %115, align 4, !tbaa !0
- %116 = load i32* @a28, align 4, !tbaa !0
- %117 = load i32** @c28, align 4, !tbaa !3
- store i32 %116, i32* %117, align 4, !tbaa !0
- %118 = load i32* @a29, align 4, !tbaa !0
- %119 = load i32** @c29, align 4, !tbaa !3
- store i32 %118, i32* %119, align 4, !tbaa !0
- %120 = load i32* @a0, align 4, !tbaa !0
+ %0 = load i32* @a0, align 4
+ %1 = load i32** @b0, align 4
+ store i32 %0, i32* %1, align 4
+ %2 = load i32* @a1, align 4
+ %3 = load i32** @b1, align 4
+ store i32 %2, i32* %3, align 4
+ %4 = load i32* @a2, align 4
+ %5 = load i32** @b2, align 4
+ store i32 %4, i32* %5, align 4
+ %6 = load i32* @a3, align 4
+ %7 = load i32** @b3, align 4
+ store i32 %6, i32* %7, align 4
+ %8 = load i32* @a4, align 4
+ %9 = load i32** @b4, align 4
+ store i32 %8, i32* %9, align 4
+ %10 = load i32* @a5, align 4
+ %11 = load i32** @b5, align 4
+ store i32 %10, i32* %11, align 4
+ %12 = load i32* @a6, align 4
+ %13 = load i32** @b6, align 4
+ store i32 %12, i32* %13, align 4
+ %14 = load i32* @a7, align 4
+ %15 = load i32** @b7, align 4
+ store i32 %14, i32* %15, align 4
+ %16 = load i32* @a8, align 4
+ %17 = load i32** @b8, align 4
+ store i32 %16, i32* %17, align 4
+ %18 = load i32* @a9, align 4
+ %19 = load i32** @b9, align 4
+ store i32 %18, i32* %19, align 4
+ %20 = load i32* @a10, align 4
+ %21 = load i32** @b10, align 4
+ store i32 %20, i32* %21, align 4
+ %22 = load i32* @a11, align 4
+ %23 = load i32** @b11, align 4
+ store i32 %22, i32* %23, align 4
+ %24 = load i32* @a12, align 4
+ %25 = load i32** @b12, align 4
+ store i32 %24, i32* %25, align 4
+ %26 = load i32* @a13, align 4
+ %27 = load i32** @b13, align 4
+ store i32 %26, i32* %27, align 4
+ %28 = load i32* @a14, align 4
+ %29 = load i32** @b14, align 4
+ store i32 %28, i32* %29, align 4
+ %30 = load i32* @a15, align 4
+ %31 = load i32** @b15, align 4
+ store i32 %30, i32* %31, align 4
+ %32 = load i32* @a16, align 4
+ %33 = load i32** @b16, align 4
+ store i32 %32, i32* %33, align 4
+ %34 = load i32* @a17, align 4
+ %35 = load i32** @b17, align 4
+ store i32 %34, i32* %35, align 4
+ %36 = load i32* @a18, align 4
+ %37 = load i32** @b18, align 4
+ store i32 %36, i32* %37, align 4
+ %38 = load i32* @a19, align 4
+ %39 = load i32** @b19, align 4
+ store i32 %38, i32* %39, align 4
+ %40 = load i32* @a20, align 4
+ %41 = load i32** @b20, align 4
+ store i32 %40, i32* %41, align 4
+ %42 = load i32* @a21, align 4
+ %43 = load i32** @b21, align 4
+ store i32 %42, i32* %43, align 4
+ %44 = load i32* @a22, align 4
+ %45 = load i32** @b22, align 4
+ store i32 %44, i32* %45, align 4
+ %46 = load i32* @a23, align 4
+ %47 = load i32** @b23, align 4
+ store i32 %46, i32* %47, align 4
+ %48 = load i32* @a24, align 4
+ %49 = load i32** @b24, align 4
+ store i32 %48, i32* %49, align 4
+ %50 = load i32* @a25, align 4
+ %51 = load i32** @b25, align 4
+ store i32 %50, i32* %51, align 4
+ %52 = load i32* @a26, align 4
+ %53 = load i32** @b26, align 4
+ store i32 %52, i32* %53, align 4
+ %54 = load i32* @a27, align 4
+ %55 = load i32** @b27, align 4
+ store i32 %54, i32* %55, align 4
+ %56 = load i32* @a28, align 4
+ %57 = load i32** @b28, align 4
+ store i32 %56, i32* %57, align 4
+ %58 = load i32* @a29, align 4
+ %59 = load i32** @b29, align 4
+ store i32 %58, i32* %59, align 4
+ %60 = load i32* @a0, align 4
+ %61 = load i32** @c0, align 4
+ store i32 %60, i32* %61, align 4
+ %62 = load i32* @a1, align 4
+ %63 = load i32** @c1, align 4
+ store i32 %62, i32* %63, align 4
+ %64 = load i32* @a2, align 4
+ %65 = load i32** @c2, align 4
+ store i32 %64, i32* %65, align 4
+ %66 = load i32* @a3, align 4
+ %67 = load i32** @c3, align 4
+ store i32 %66, i32* %67, align 4
+ %68 = load i32* @a4, align 4
+ %69 = load i32** @c4, align 4
+ store i32 %68, i32* %69, align 4
+ %70 = load i32* @a5, align 4
+ %71 = load i32** @c5, align 4
+ store i32 %70, i32* %71, align 4
+ %72 = load i32* @a6, align 4
+ %73 = load i32** @c6, align 4
+ store i32 %72, i32* %73, align 4
+ %74 = load i32* @a7, align 4
+ %75 = load i32** @c7, align 4
+ store i32 %74, i32* %75, align 4
+ %76 = load i32* @a8, align 4
+ %77 = load i32** @c8, align 4
+ store i32 %76, i32* %77, align 4
+ %78 = load i32* @a9, align 4
+ %79 = load i32** @c9, align 4
+ store i32 %78, i32* %79, align 4
+ %80 = load i32* @a10, align 4
+ %81 = load i32** @c10, align 4
+ store i32 %80, i32* %81, align 4
+ %82 = load i32* @a11, align 4
+ %83 = load i32** @c11, align 4
+ store i32 %82, i32* %83, align 4
+ %84 = load i32* @a12, align 4
+ %85 = load i32** @c12, align 4
+ store i32 %84, i32* %85, align 4
+ %86 = load i32* @a13, align 4
+ %87 = load i32** @c13, align 4
+ store i32 %86, i32* %87, align 4
+ %88 = load i32* @a14, align 4
+ %89 = load i32** @c14, align 4
+ store i32 %88, i32* %89, align 4
+ %90 = load i32* @a15, align 4
+ %91 = load i32** @c15, align 4
+ store i32 %90, i32* %91, align 4
+ %92 = load i32* @a16, align 4
+ %93 = load i32** @c16, align 4
+ store i32 %92, i32* %93, align 4
+ %94 = load i32* @a17, align 4
+ %95 = load i32** @c17, align 4
+ store i32 %94, i32* %95, align 4
+ %96 = load i32* @a18, align 4
+ %97 = load i32** @c18, align 4
+ store i32 %96, i32* %97, align 4
+ %98 = load i32* @a19, align 4
+ %99 = load i32** @c19, align 4
+ store i32 %98, i32* %99, align 4
+ %100 = load i32* @a20, align 4
+ %101 = load i32** @c20, align 4
+ store i32 %100, i32* %101, align 4
+ %102 = load i32* @a21, align 4
+ %103 = load i32** @c21, align 4
+ store i32 %102, i32* %103, align 4
+ %104 = load i32* @a22, align 4
+ %105 = load i32** @c22, align 4
+ store i32 %104, i32* %105, align 4
+ %106 = load i32* @a23, align 4
+ %107 = load i32** @c23, align 4
+ store i32 %106, i32* %107, align 4
+ %108 = load i32* @a24, align 4
+ %109 = load i32** @c24, align 4
+ store i32 %108, i32* %109, align 4
+ %110 = load i32* @a25, align 4
+ %111 = load i32** @c25, align 4
+ store i32 %110, i32* %111, align 4
+ %112 = load i32* @a26, align 4
+ %113 = load i32** @c26, align 4
+ store i32 %112, i32* %113, align 4
+ %114 = load i32* @a27, align 4
+ %115 = load i32** @c27, align 4
+ store i32 %114, i32* %115, align 4
+ %116 = load i32* @a28, align 4
+ %117 = load i32** @c28, align 4
+ store i32 %116, i32* %117, align 4
+ %118 = load i32* @a29, align 4
+ %119 = load i32** @c29, align 4
+ store i32 %118, i32* %119, align 4
+ %120 = load i32* @a0, align 4
ret i32 %120
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/PowerPC/bdzlr.ll b/test/CodeGen/PowerPC/bdzlr.ll
index 656a85860d..e487558e94 100644
--- a/test/CodeGen/PowerPC/bdzlr.ll
+++ b/test/CodeGen/PowerPC/bdzlr.ll
@@ -35,15 +35,15 @@ for.body: ; preds = %for.body.for.body_c
%0 = phi %struct.lua_TValue.17.692* [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body.for.body_crit_edge ]
%tt = getelementptr inbounds %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
- %1 = load i32* %tt, align 4, !tbaa !0
- store i32 %1, i32* undef, align 4, !tbaa !0
+ %1 = load i32* %tt, align 4
+ store i32 %1, i32* undef, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge
for.body.for.body_crit_edge: ; preds = %for.body
- %.pre = load %struct.lua_TValue.17.692** undef, align 8, !tbaa !3
+ %.pre = load %struct.lua_TValue.17.692** undef, align 8
br label %for.body
for.end: ; preds = %for.body, %if.end, %entry
@@ -57,8 +57,3 @@ for.end: ; preds = %for.body, %if.end,
}
attributes #0 = { nounwind }
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/PowerPC/cr-spills.ll b/test/CodeGen/PowerPC/cr-spills.ll
index d6df7a2376..bb637fe850 100644
--- a/test/CodeGen/PowerPC/cr-spills.ll
+++ b/test/CodeGen/PowerPC/cr-spills.ll
@@ -53,11 +53,11 @@ for.cond286.preheader: ; preds = %for.body252
for.cond290.preheader: ; preds = %for.end520, %for.cond286.preheader
%srcptr.31595 = phi i16* [ getelementptr inbounds ([768 x i16]* @SetupFastFullPelSearch.orig_pels, i64 0, i64 0), %for.cond286.preheader ], [ null, %for.end520 ]
- %1 = load i32* undef, align 4, !tbaa !0
- %2 = load i32* @weight_luma, align 4, !tbaa !0
- %3 = load i32* @wp_luma_round, align 4, !tbaa !0
- %4 = load i32* @luma_log_weight_denom, align 4, !tbaa !0
- %5 = load i32* @offset_luma, align 4, !tbaa !0
+ %1 = load i32* undef, align 4
+ %2 = load i32* @weight_luma, align 4
+ %3 = load i32* @wp_luma_round, align 4
+ %4 = load i32* @luma_log_weight_denom, align 4
+ %5 = load i32* @offset_luma, align 4
%incdec.ptr502.sum = add i64 undef, 16
br label %for.body293
@@ -68,7 +68,7 @@ for.body293: ; preds = %for.body293, %for.c
%LineSadBlk1.01587 = phi i32 [ 0, %for.cond290.preheader ], [ %add402, %for.body293 ]
%LineSadBlk3.01586 = phi i32 [ 0, %for.cond290.preheader ], [ %add514, %for.body293 ]
%LineSadBlk2.01585 = phi i32 [ 0, %for.cond290.preheader ], [ %add458, %for.body293 ]
- %6 = load i16* %refptr.11590, align 2, !tbaa !3
+ %6 = load i16* %refptr.11590, align 2
%conv294 = zext i16 %6 to i32
%mul295 = mul nsw i32 %conv294, %2
%add296 = add nsw i32 %mul295, %3
@@ -78,16 +78,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1514 = select i1 %cmp.i.i1513, i32 %add297, i32 0
%cmp.i4.i1515 = icmp slt i32 %cond.i.i1514, %1
%cond.i5.i1516 = select i1 %cmp.i4.i1515, i32 %cond.i.i1514, i32 %1
- %7 = load i16* %srcptr.41591, align 2, !tbaa !3
+ %7 = load i16* %srcptr.41591, align 2
%conv300 = zext i16 %7 to i32
%sub301 = sub nsw i32 %cond.i5.i1516, %conv300
%idxprom302 = sext i32 %sub301 to i64
%arrayidx303 = getelementptr inbounds i32* %cond, i64 %idxprom302
- %8 = load i32* %arrayidx303, align 4, !tbaa !0
+ %8 = load i32* %arrayidx303, align 4
%add304 = add nsw i32 %8, %LineSadBlk0.01588
- %9 = load i32* undef, align 4, !tbaa !0
+ %9 = load i32* undef, align 4
%add318 = add nsw i32 %add304, %9
- %10 = load i16* undef, align 2, !tbaa !3
+ %10 = load i16* undef, align 2
%conv321 = zext i16 %10 to i32
%mul322 = mul nsw i32 %conv321, %2
%add323 = add nsw i32 %mul322, %3
@@ -100,22 +100,22 @@ for.body293: ; preds = %for.body293, %for.c
%sub329 = sub nsw i32 %cond.i5.i1508, 0
%idxprom330 = sext i32 %sub329 to i64
%arrayidx331 = getelementptr inbounds i32* %cond, i64 %idxprom330
- %11 = load i32* %arrayidx331, align 4, !tbaa !0
+ %11 = load i32* %arrayidx331, align 4
%add332 = add nsw i32 %add318, %11
%cmp.i.i1501 = icmp sgt i32 undef, 0
%cond.i.i1502 = select i1 %cmp.i.i1501, i32 undef, i32 0
%cmp.i4.i1503 = icmp slt i32 %cond.i.i1502, %1
%cond.i5.i1504 = select i1 %cmp.i4.i1503, i32 %cond.i.i1502, i32 %1
%incdec.ptr341 = getelementptr inbounds i16* %srcptr.41591, i64 4
- %12 = load i16* null, align 2, !tbaa !3
+ %12 = load i16* null, align 2
%conv342 = zext i16 %12 to i32
%sub343 = sub nsw i32 %cond.i5.i1504, %conv342
%idxprom344 = sext i32 %sub343 to i64
%arrayidx345 = getelementptr inbounds i32* %cond, i64 %idxprom344
- %13 = load i32* %arrayidx345, align 4, !tbaa !0
+ %13 = load i32* %arrayidx345, align 4
%add346 = add nsw i32 %add332, %13
%incdec.ptr348 = getelementptr inbounds i16* %refptr.11590, i64 5
- %14 = load i16* null, align 2, !tbaa !3
+ %14 = load i16* null, align 2
%conv349 = zext i16 %14 to i32
%mul350 = mul nsw i32 %conv349, %2
%add351 = add nsw i32 %mul350, %3
@@ -126,15 +126,15 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1499 = icmp slt i32 %cond.i.i1498, %1
%cond.i5.i1500 = select i1 %cmp.i4.i1499, i32 %cond.i.i1498, i32 %1
%incdec.ptr355 = getelementptr inbounds i16* %srcptr.41591, i64 5
- %15 = load i16* %incdec.ptr341, align 2, !tbaa !3
+ %15 = load i16* %incdec.ptr341, align 2
%conv356 = zext i16 %15 to i32
%sub357 = sub nsw i32 %cond.i5.i1500, %conv356
%idxprom358 = sext i32 %sub357 to i64
%arrayidx359 = getelementptr inbounds i32* %cond, i64 %idxprom358
- %16 = load i32* %arrayidx359, align 4, !tbaa !0
+ %16 = load i32* %arrayidx359, align 4
%add360 = add nsw i32 %16, %LineSadBlk1.01587
%incdec.ptr362 = getelementptr inbounds i16* %refptr.11590, i64 6
- %17 = load i16* %incdec.ptr348, align 2, !tbaa !3
+ %17 = load i16* %incdec.ptr348, align 2
%conv363 = zext i16 %17 to i32
%mul364 = mul nsw i32 %conv363, %2
%add365 = add nsw i32 %mul364, %3
@@ -145,15 +145,15 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1495 = icmp slt i32 %cond.i.i1494, %1
%cond.i5.i1496 = select i1 %cmp.i4.i1495, i32 %cond.i.i1494, i32 %1
%incdec.ptr369 = getelementptr inbounds i16* %srcptr.41591, i64 6
- %18 = load i16* %incdec.ptr355, align 2, !tbaa !3
+ %18 = load i16* %incdec.ptr355, align 2
%conv370 = zext i16 %18 to i32
%sub371 = sub nsw i32 %cond.i5.i1496, %conv370
%idxprom372 = sext i32 %sub371 to i64
%arrayidx373 = getelementptr inbounds i32* %cond, i64 %idxprom372
- %19 = load i32* %arrayidx373, align 4, !tbaa !0
+ %19 = load i32* %arrayidx373, align 4
%add374 = add nsw i32 %add360, %19
%incdec.ptr376 = getelementptr inbounds i16* %refptr.11590, i64 7
- %20 = load i16* %incdec.ptr362, align 2, !tbaa !3
+ %20 = load i16* %incdec.ptr362, align 2
%conv377 = zext i16 %20 to i32
%mul378 = mul nsw i32 %conv377, %2
%add379 = add nsw i32 %mul378, %3
@@ -164,14 +164,14 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1491 = icmp slt i32 %cond.i.i1490, %1
%cond.i5.i1492 = select i1 %cmp.i4.i1491, i32 %cond.i.i1490, i32 %1
%incdec.ptr383 = getelementptr inbounds i16* %srcptr.41591, i64 7
- %21 = load i16* %incdec.ptr369, align 2, !tbaa !3
+ %21 = load i16* %incdec.ptr369, align 2
%conv384 = zext i16 %21 to i32
%sub385 = sub nsw i32 %cond.i5.i1492, %conv384
%idxprom386 = sext i32 %sub385 to i64
%arrayidx387 = getelementptr inbounds i32* %cond, i64 %idxprom386
- %22 = load i32* %arrayidx387, align 4, !tbaa !0
+ %22 = load i32* %arrayidx387, align 4
%add388 = add nsw i32 %add374, %22
- %23 = load i16* %incdec.ptr376, align 2, !tbaa !3
+ %23 = load i16* %incdec.ptr376, align 2
%conv391 = zext i16 %23 to i32
%mul392 = mul nsw i32 %conv391, %2
%add395 = add nsw i32 0, %5
@@ -180,25 +180,25 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1487 = icmp slt i32 %cond.i.i1486, %1
%cond.i5.i1488 = select i1 %cmp.i4.i1487, i32 %cond.i.i1486, i32 %1
%incdec.ptr397 = getelementptr inbounds i16* %srcptr.41591, i64 8
- %24 = load i16* %incdec.ptr383, align 2, !tbaa !3
+ %24 = load i16* %incdec.ptr383, align 2
%conv398 = zext i16 %24 to i32
%sub399 = sub nsw i32 %cond.i5.i1488, %conv398
%idxprom400 = sext i32 %sub399 to i64
%arrayidx401 = getelementptr inbounds i32* %cond, i64 %idxprom400
- %25 = load i32* %arrayidx401, align 4, !tbaa !0
+ %25 = load i32* %arrayidx401, align 4
%add402 = add nsw i32 %add388, %25
%incdec.ptr404 = getelementptr inbounds i16* %refptr.11590, i64 9
%cmp.i4.i1483 = icmp slt i32 undef, %1
%cond.i5.i1484 = select i1 %cmp.i4.i1483, i32 undef, i32 %1
- %26 = load i16* %incdec.ptr397, align 2, !tbaa !3
+ %26 = load i16* %incdec.ptr397, align 2
%conv412 = zext i16 %26 to i32
%sub413 = sub nsw i32 %cond.i5.i1484, %conv412
%idxprom414 = sext i32 %sub413 to i64
%arrayidx415 = getelementptr inbounds i32* %cond, i64 %idxprom414
- %27 = load i32* %arrayidx415, align 4, !tbaa !0
+ %27 = load i32* %arrayidx415, align 4
%add416 = add nsw i32 %27, %LineSadBlk2.01585
%incdec.ptr418 = getelementptr inbounds i16* %refptr.11590, i64 10
- %28 = load i16* %incdec.ptr404, align 2, !tbaa !3
+ %28 = load i16* %incdec.ptr404, align 2
%conv419 = zext i16 %28 to i32
%mul420 = mul nsw i32 %conv419, %2
%add421 = add nsw i32 %mul420, %3
@@ -212,10 +212,10 @@ for.body293: ; preds = %for.body293, %for.c
%sub427 = sub nsw i32 %cond.i5.i1480, 0
%idxprom428 = sext i32 %sub427 to i64
%arrayidx429 = getelementptr inbounds i32* %cond, i64 %idxprom428
- %29 = load i32* %arrayidx429, align 4, !tbaa !0
+ %29 = load i32* %arrayidx429, align 4
%add430 = add nsw i32 %add416, %29
%incdec.ptr432 = getelementptr inbounds i16* %refptr.11590, i64 11
- %30 = load i16* %incdec.ptr418, align 2, !tbaa !3
+ %30 = load i16* %incdec.ptr418, align 2
%conv433 = zext i16 %30 to i32
%mul434 = mul nsw i32 %conv433, %2
%add435 = add nsw i32 %mul434, %3
@@ -225,15 +225,15 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1474 = select i1 %cmp.i.i1473, i32 %add437, i32 0
%cmp.i4.i1475 = icmp slt i32 %cond.i.i1474, %1
%cond.i5.i1476 = select i1 %cmp.i4.i1475, i32 %cond.i.i1474, i32 %1
- %31 = load i16* %incdec.ptr425, align 2, !tbaa !3
+ %31 = load i16* %incdec.ptr425, align 2
%conv440 = zext i16 %31 to i32
%sub441 = sub nsw i32 %cond.i5.i1476, %conv440
%idxprom442 = sext i32 %sub441 to i64
%arrayidx443 = getelementptr inbounds i32* %cond, i64 %idxprom442
- %32 = load i32* %arrayidx443, align 4, !tbaa !0
+ %32 = load i32* %arrayidx443, align 4
%add444 = add nsw i32 %add430, %32
%incdec.ptr446 = getelementptr inbounds i16* %refptr.11590, i64 12
- %33 = load i16* %incdec.ptr432, align 2, !tbaa !3
+ %33 = load i16* %incdec.ptr432, align 2
%conv447 = zext i16 %33 to i32
%mul448 = mul nsw i32 %conv447, %2
%add449 = add nsw i32 %mul448, %3
@@ -244,15 +244,15 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1471 = icmp slt i32 %cond.i.i1470, %1
%cond.i5.i1472 = select i1 %cmp.i4.i1471, i32 %cond.i.i1470, i32 %1
%incdec.ptr453 = getelementptr inbounds i16* %srcptr.41591, i64 12
- %34 = load i16* undef, align 2, !tbaa !3
+ %34 = load i16* undef, align 2
%conv454 = zext i16 %34 to i32
%sub455 = sub nsw i32 %cond.i5.i1472, %conv454
%idxprom456 = sext i32 %sub455 to i64
%arrayidx457 = getelementptr inbounds i32* %cond, i64 %idxprom456
- %35 = load i32* %arrayidx457, align 4, !tbaa !0
+ %35 = load i32* %arrayidx457, align 4
%add458 = add nsw i32 %add444, %35
%incdec.ptr460 = getelementptr inbounds i16* %refptr.11590, i64 13
- %36 = load i16* %incdec.ptr446, align 2, !tbaa !3
+ %36 = load i16* %incdec.ptr446, align 2
%conv461 = zext i16 %36 to i32
%mul462 = mul nsw i32 %conv461, %2
%add463 = add nsw i32 %mul462, %3
@@ -263,12 +263,12 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1467 = icmp slt i32 %cond.i.i1466, %1
%cond.i5.i1468 = select i1 %cmp.i4.i1467, i32 %cond.i.i1466, i32 %1
%incdec.ptr467 = getelementptr inbounds i16* %srcptr.41591, i64 13
- %37 = load i16* %incdec.ptr453, align 2, !tbaa !3
+ %37 = load i16* %incdec.ptr453, align 2
%conv468 = zext i16 %37 to i32
%sub469 = sub nsw i32 %cond.i5.i1468, %conv468
%idxprom470 = sext i32 %sub469 to i64
%arrayidx471 = getelementptr inbounds i32* %cond, i64 %idxprom470
- %38 = load i32* %arrayidx471, align 4, !tbaa !0
+ %38 = load i32* %arrayidx471, align 4
%add472 = add nsw i32 %38, %LineSadBlk3.01586
%incdec.ptr474 = getelementptr inbounds i16* %refptr.11590, i64 14
%add477 = add nsw i32 0, %3
@@ -279,15 +279,15 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1463 = icmp slt i32 %cond.i.i1462, %1
%cond.i5.i1464 = select i1 %cmp.i4.i1463, i32 %cond.i.i1462, i32 %1
%incdec.ptr481 = getelementptr inbounds i16* %srcptr.41591, i64 14
- %39 = load i16* %incdec.ptr467, align 2, !tbaa !3
+ %39 = load i16* %incdec.ptr467, align 2
%conv482 = zext i16 %39 to i32
%sub483 = sub nsw i32 %cond.i5.i1464, %conv482
%idxprom484 = sext i32 %sub483 to i64
%arrayidx485 = getelementptr inbounds i32* %cond, i64 %idxprom484
- %40 = load i32* %arrayidx485, align 4, !tbaa !0
+ %40 = load i32* %arrayidx485, align 4
%add486 = add nsw i32 %add472, %40
%incdec.ptr488 = getelementptr inbounds i16* %refptr.11590, i64 15
- %41 = load i16* %incdec.ptr474, align 2, !tbaa !3
+ %41 = load i16* %incdec.ptr474, align 2
%conv489 = zext i16 %41 to i32
%mul490 = mul nsw i32 %conv489, %2
%add491 = add nsw i32 %mul490, %3
@@ -298,14 +298,14 @@ for.body293: ; preds = %for.body293, %for.c
%cmp.i4.i1459 = icmp slt i32 %cond.i.i1458, %1
%cond.i5.i1460 = select i1 %cmp.i4.i1459, i32 %cond.i.i1458, i32 %1
%incdec.ptr495 = getelementptr inbounds i16* %srcptr.41591, i64 15
- %42 = load i16* %incdec.ptr481, align 2, !tbaa !3
+ %42 = load i16* %incdec.ptr481, align 2
%conv496 = zext i16 %42 to i32
%sub497 = sub nsw i32 %cond.i5.i1460, %conv496
%idxprom498 = sext i32 %sub497 to i64
%arrayidx499 = getelementptr inbounds i32* %cond, i64 %idxprom498
- %43 = load i32* %arrayidx499, align 4, !tbaa !0
+ %43 = load i32* %arrayidx499, align 4
%add500 = add nsw i32 %add486, %43
- %44 = load i16* %incdec.ptr488, align 2, !tbaa !3
+ %44 = load i16* %incdec.ptr488, align 2
%conv503 = zext i16 %44 to i32
%mul504 = mul nsw i32 %conv503, %2
%add505 = add nsw i32 %mul504, %3
@@ -315,22 +315,22 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1454 = select i1 %cmp.i.i1453, i32 %add507, i32 0
%cmp.i4.i1455 = icmp slt i32 %cond.i.i1454, %1
%cond.i5.i1456 = select i1 %cmp.i4.i1455, i32 %cond.i.i1454, i32 %1
- %45 = load i16* %incdec.ptr495, align 2, !tbaa !3
+ %45 = load i16* %incdec.ptr495, align 2
%conv510 = zext i16 %45 to i32
%sub511 = sub nsw i32 %cond.i5.i1456, %conv510
%idxprom512 = sext i32 %sub511 to i64
%arrayidx513 = getelementptr inbounds i32* %cond, i64 %idxprom512
- %46 = load i32* %arrayidx513, align 4, !tbaa !0
+ %46 = load i32* %arrayidx513, align 4
%add514 = add nsw i32 %add500, %46
%add.ptr517 = getelementptr inbounds i16* %refptr.11590, i64 %incdec.ptr502.sum
%exitcond1692 = icmp eq i32 undef, 4
br i1 %exitcond1692, label %for.end520, label %for.body293
for.end520: ; preds = %for.body293
- store i32 %add346, i32* undef, align 4, !tbaa !0
- store i32 %add402, i32* undef, align 4, !tbaa !0
- store i32 %add458, i32* undef, align 4, !tbaa !0
- store i32 %add514, i32* null, align 4, !tbaa !0
+ store i32 %add346, i32* undef, align 4
+ store i32 %add402, i32* undef, align 4
+ store i32 %add458, i32* undef, align 4
+ store i32 %add514, i32* null, align 4
br i1 undef, label %for.end543, label %for.cond290.preheader
for.end543: ; preds = %for.end520
@@ -402,8 +402,3 @@ for.end999: ; preds = %for.inc997
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
index 523f329303..c2cf98174f 100644
--- a/test/CodeGen/PowerPC/optcmp.ll
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
entry:
%sub = sub nsw i32 %a, %b
- store i32 %sub, i32* %c, align 4, !tbaa !0
+ store i32 %sub, i32* %c, align 4
%cmp = icmp sgt i32 %a, %b
%cond = select i1 %cmp, i32 %a, i32 %b
ret i32 %cond
@@ -17,7 +17,7 @@ entry:
define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
entry:
%shl = shl i32 %a, %b
- store i32 %shl, i32* %c, align 4, !tbaa !0
+ store i32 %shl, i32* %c, align 4
%cmp = icmp sgt i32 %shl, 0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -29,7 +29,7 @@ entry:
define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8, !tbaa !3
+ store i64 %sub, i64* %c, align 8
%cmp = icmp sgt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
@@ -43,7 +43,7 @@ entry:
define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8, !tbaa !3
+ store i64 %sub, i64* %c, align 8
%cmp = icmp sle i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
@@ -57,7 +57,7 @@ entry:
define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%sub = sub nsw i64 %b, %a
- store i64 %sub, i64* %c, align 8, !tbaa !3
+ store i64 %sub, i64* %c, align 8
%cmp = icmp sgt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
@@ -71,7 +71,7 @@ entry:
define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%sub = sub nsw i64 %b, %a
- store i64 %sub, i64* %c, align 8, !tbaa !3
+ store i64 %sub, i64* %c, align 8
%cmp = icmp eq i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
@@ -85,7 +85,7 @@ entry:
define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8, !tbaa !3
+ store i64 %sub, i64* %c, align 8
%cmp = icmp eq i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
@@ -99,7 +99,7 @@ entry:
define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%shl = shl i64 %a, %b
- store i64 %shl, i64* %c, align 8, !tbaa !3
+ store i64 %shl, i64* %c, align 8
%cmp = icmp sgt i64 %shl, 0
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
@@ -112,7 +112,7 @@ entry:
define double @food(double %a, double %b, double* nocapture %c) #0 {
entry:
%sub = fsub double %a, %b
- store double %sub, double* %c, align 8, !tbaa !3
+ store double %sub, double* %c, align 8
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
@@ -125,7 +125,7 @@ entry:
define float @foof(float %a, float %b, float* nocapture %c) #0 {
entry:
%sub = fsub float %a, %b
- store float %sub, float* %c, align 4, !tbaa !3
+ store float %sub, float* %c, align 4
%cmp = fcmp ogt float %a, %b
%cond = select i1 %cmp, float %a, float %b
ret float %cond
@@ -134,10 +134,3 @@ entry:
; CHECK-NOT: fsubs. 0, 1, 2
; CHECK: stfs 0, 0(5)
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"long", metadata !1}
-!4 = metadata !{metadata !"any pointer", metadata !1}
-
diff --git a/test/CodeGen/PowerPC/pr15031.ll b/test/CodeGen/PowerPC/pr15031.ll
index 5ccf941a1f..e58ad80e13 100644
--- a/test/CodeGen/PowerPC/pr15031.ll
+++ b/test/CodeGen/PowerPC/pr15031.ll
@@ -317,54 +317,42 @@ if.then: ; preds = %entry
if.end: ; preds = %entry, %if.then
%Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
%RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
- %1 = load i32* %RegNo.i.i, align 4, !tbaa !0
+ %1 = load i32* %RegNo.i.i, align 4
%cmp.i = icmp eq i32 %1, %Reg.addr.0
br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
if.end.i: ; preds = %if.end
%ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 3
- %2 = load %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8, !tbaa !3
+ %2 = load %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
%tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
br i1 %tobool.i, label %if.end13.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
%Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr"* %2, i64 0, i32 2
- %3 = load %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8, !tbaa !3
+ %3 = load %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
%tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
if.then6.i: ; preds = %if.then3.i
%xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
- %4 = load %"class.llvm::MachineFunction"** %xParent.i.i, align 8, !tbaa !3
+ %4 = load %"class.llvm::MachineFunction"** %xParent.i.i, align 8
%tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
if.then9.i: ; preds = %if.then6.i
%RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction"* %4, i64 0, i32 5
- %5 = load %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8, !tbaa !3
+ %5 = load %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
- store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4, !tbaa !0
+ store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
br label %_ZN4llvm14MachineOperand6setRegEj.exit
if.end13.i: ; preds = %if.then6.i, %if.then3.i, %if.end.i
- store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4, !tbaa !0
+ store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
br label %_ZN4llvm14MachineOperand6setRegEj.exit
_ZN4llvm14MachineOperand6setRegEj.exit: ; preds = %if.end, %if.then9.i, %if.end13.i
ret void
}
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
-!4 = metadata !{metadata !"vtable pointer", metadata !2}
-!5 = metadata !{metadata !"long", metadata !1}
-!6 = metadata !{i64 0, i64 8, metadata !3, i64 8, i64 8, metadata !5}
-!7 = metadata !{metadata !"short", metadata !1}
-!8 = metadata !{i64 0, i64 1, metadata !1, i64 1, i64 4, metadata !0, i64 2, i64 1, metadata !1, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 3, i64 1, metadata !9, i64 4, i64 4, metadata !0, i64 4, i64 4, metadata !0, i64 8, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !5, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 24, i64 8, metadata !3, i64 16, i64 4, metadata !0, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 16, i64 8, metadata !3, i64 24, i64 4, metadata !0}
-!9 = metadata !{metadata !"bool", metadata !1}
-!10 = metadata !{i8 0, i8 2}
-
; CHECK-NOT: lbzu 3, 1(3)
diff --git a/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll b/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
index 0f18f0969b..d6aa2e3863 100644
--- a/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
+++ b/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
@@ -41,13 +41,8 @@ cond.false156.i: ; preds = %for.end.i, %land.en
cond.end166.i: ; preds = %cond.false156.i, %cond.true138.i
%idxprom1113.i = phi i64 [ %idxprom1114.i, %cond.false156.i ], [ undef, %cond.true138.i ]
- %tmp235.i = load %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8, !tbaa !0
+ %tmp235.i = load %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
%att.i = getelementptr inbounds %struct.state* %tmp235.i, i64 %idxprom1113.i, i32 0
- store i32 0, i32* %att.i, align 4, !tbaa !3
+ store i32 0, i32* %att.i, align 4
ret void
}
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/CodeGen/X86/2011-10-12-MachineCSE.ll b/test/CodeGen/X86/2011-10-12-MachineCSE.ll
index cd15f84605..72e672ac4f 100644
--- a/test/CodeGen/X86/2011-10-12-MachineCSE.ll
+++ b/test/CodeGen/X86/2011-10-12-MachineCSE.ll
@@ -20,11 +20,11 @@ entry:
%2 = lshr i32 %1, 16
%bf.clear = and i32 %2, 255
%idxprom = sext i32 %bf.clear to i64
- %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8, !tbaa !0
+ %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
%handlers = getelementptr inbounds %struct.optab* %3, i32 0, i32 1
%arrayidx = getelementptr inbounds [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
%insn_code = getelementptr inbounds %struct.anon.3* %arrayidx, i32 0, i32 0
- %4 = load i32* %insn_code, align 4, !tbaa !3
+ %4 = load i32* %insn_code, align 4
%cmp = icmp eq i32 %4, 1317
br i1 %cmp, label %if.then, label %lor.lhs.false
@@ -32,14 +32,14 @@ lor.lhs.false: ; preds = %entry
%idxprom1 = sext i32 %4 to i64
%arrayidx2 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
%operand = getelementptr inbounds %struct.insn_data* %arrayidx2, i32 0, i32 3
- %5 = load %struct.insn_operand_data** %operand, align 8, !tbaa !0
+ %5 = load %struct.insn_operand_data** %operand, align 8
%arrayidx3 = getelementptr inbounds %struct.insn_operand_data* %5, i64 0
%predicate = getelementptr inbounds %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
- %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8, !tbaa !0
+ %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8
%idxprom4 = sext i32 %4 to i64
%arrayidx5 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
%operand6 = getelementptr inbounds %struct.insn_data* %arrayidx5, i32 0, i32 3
- %7 = load %struct.insn_operand_data** %operand6, align 8, !tbaa !0
+ %7 = load %struct.insn_operand_data** %operand6, align 8
%arrayidx7 = getelementptr inbounds %struct.insn_operand_data* %7, i64 0
%8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8*
%bf.field.offs = getelementptr i8* %8, i32 16
@@ -54,14 +54,14 @@ lor.lhs.false9: ; preds = %lor.lhs.false
%idxprom10 = sext i32 %4 to i64
%arrayidx11 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
%operand12 = getelementptr inbounds %struct.insn_data* %arrayidx11, i32 0, i32 3
- %11 = load %struct.insn_operand_data** %operand12, align 8, !tbaa !0
+ %11 = load %struct.insn_operand_data** %operand12, align 8
%arrayidx13 = getelementptr inbounds %struct.insn_operand_data* %11, i64 1
%predicate14 = getelementptr inbounds %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
- %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8, !tbaa !0
+ %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8
%idxprom15 = sext i32 %4 to i64
%arrayidx16 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
%operand17 = getelementptr inbounds %struct.insn_data* %arrayidx16, i32 0, i32 3
- %13 = load %struct.insn_operand_data** %operand17, align 8, !tbaa !0
+ %13 = load %struct.insn_operand_data** %operand17, align 8
%arrayidx18 = getelementptr inbounds %struct.insn_operand_data* %13, i64 1
%14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8*
%bf.field.offs19 = getelementptr i8* %14, i32 16
@@ -76,14 +76,14 @@ lor.lhs.false23: ; preds = %lor.lhs.false9
%idxprom24 = sext i32 %4 to i64
%arrayidx25 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
%operand26 = getelementptr inbounds %struct.insn_data* %arrayidx25, i32 0, i32 3
- %17 = load %struct.insn_operand_data** %operand26, align 8, !tbaa !0
+ %17 = load %struct.insn_operand_data** %operand26, align 8
%arrayidx27 = getelementptr inbounds %struct.insn_operand_data* %17, i64 2
%predicate28 = getelementptr inbounds %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
- %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8, !tbaa !0
+ %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8
%idxprom29 = sext i32 %4 to i64
%arrayidx30 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
%operand31 = getelementptr inbounds %struct.insn_data* %arrayidx30, i32 0, i32 3
- %19 = load %struct.insn_operand_data** %operand31, align 8, !tbaa !0
+ %19 = load %struct.insn_operand_data** %operand31, align 8
%arrayidx32 = getelementptr inbounds %struct.insn_operand_data* %19, i64 2
%20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8*
%bf.field.offs33 = getelementptr i8* %20, i32 16
@@ -101,7 +101,7 @@ if.end: ; preds = %lor.lhs.false23
%idxprom37 = sext i32 %4 to i64
%arrayidx38 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
%genfun = getelementptr inbounds %struct.insn_data* %arrayidx38, i32 0, i32 2
- %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8, !tbaa !0
+ %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
%call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c)
br label %return
@@ -109,8 +109,3 @@ return: ; preds = %if.end, %if.then
%24 = phi %struct.rtx_def* [ %call39, %if.end ], [ null, %if.then ]
ret %struct.rtx_def* %24
}
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"_ZTS9insn_code", metadata !1}
diff --git a/test/CodeGen/X86/coalesce-implicitdef.ll b/test/CodeGen/X86/coalesce-implicitdef.ll
index 19cd08cf37..9be045271d 100644
--- a/test/CodeGen/X86/coalesce-implicitdef.ll
+++ b/test/CodeGen/X86/coalesce-implicitdef.ll
@@ -26,7 +26,7 @@ for.cond: ; preds = %for.inc34, %entry
br i1 %tobool, label %for.end36, label %for.body
for.body: ; preds = %for.cond
- store i32 0, i32* @c, align 4, !tbaa !0
+ store i32 0, i32* @c, align 4
br label %for.body2
for.body2: ; preds = %for.body, %for.inc
@@ -35,7 +35,7 @@ for.body2: ; preds = %for.body, %for.inc
br i1 %tobool3, label %if.then10, label %if.then
if.then: ; preds = %for.body2
- store i32 0, i32* %i, align 4, !tbaa !0
+ store i32 0, i32* %i, align 4
br label %for.body6
for.body6: ; preds = %if.then, %for.body6
@@ -43,7 +43,7 @@ for.body6: ; preds = %if.then, %for.body6
br i1 true, label %for.body6, label %for.inc
if.then10: ; preds = %for.body2
- store i32 1, i32* @b, align 4, !tbaa !0
+ store i32 1, i32* @b, align 4
ret void
for.inc: ; preds = %for.body6
@@ -66,30 +66,30 @@ while.end: ; preds = %while.cond
for.inc27.backedge: ; preds = %while.end, %if.then22
%inc28 = add nsw i32 %0, 1
- store i32 %inc28, i32* @b, align 4, !tbaa !0
+ store i32 %inc28, i32* @b, align 4
%tobool17 = icmp eq i32 %inc28, 0
br i1 %tobool17, label %for.inc27.if.end30.loopexit56_crit_edge, label %while.condthread-pre-split
if.then22: ; preds = %while.end
- %1 = load i16* %p2.1, align 2, !tbaa !3
+ %1 = load i16* %p2.1, align 2
%tobool23 = icmp eq i16 %1, 0
br i1 %tobool23, label %for.inc27.backedge, label %label.loopexit
label.loopexit: ; preds = %if.then22
- store i32 %inc20, i32* @a, align 4, !tbaa !0
+ store i32 %inc20, i32* @a, align 4
%inc2858 = add nsw i32 %0, 1
- store i32 %inc2858, i32* @b, align 4, !tbaa !0
+ store i32 %inc2858, i32* @b, align 4
%tobool1759 = icmp eq i32 %inc2858, 0
br i1 %tobool1759, label %if.end30, label %while.condthread-pre-split
for.inc27.if.end30.loopexit56_crit_edge: ; preds = %for.inc27.backedge
- store i32 %inc20, i32* @a, align 4, !tbaa !0
+ store i32 %inc20, i32* @a, align 4
br label %if.end30
if.end30: ; preds = %for.inc27.if.end30.loopexit56_crit_edge, %label.loopexit, %label.preheader, %for.inc
%i.0.load46 = phi i32 [ 0, %for.inc ], [ %i.0.load4669, %label.preheader ], [ %i.0.load4669, %label.loopexit ], [ %i.0.load4669, %for.inc27.if.end30.loopexit56_crit_edge ]
%pi.4 = phi i32* [ %i, %for.inc ], [ %pi.3.ph, %label.preheader ], [ %pi.3.ph, %label.loopexit ], [ %pi.3.ph, %for.inc27.if.end30.loopexit56_crit_edge ]
- %2 = load i32* %pi.4, align 4, !tbaa !0
+ %2 = load i32* %pi.4, align 4
%tobool31 = icmp eq i32 %2, 0
br i1 %tobool31, label %for.inc34, label %label.preheader
@@ -100,31 +100,26 @@ for.inc34: ; preds = %if.end30
for.end36: ; preds = %for.cond
store i32 1, i32* %i, align 4
- %3 = load i32* @c, align 4, !tbaa !0
+ %3 = load i32* @c, align 4
%tobool37 = icmp eq i32 %3, 0
br i1 %tobool37, label %label.preheader, label %land.rhs
land.rhs: ; preds = %for.end36
- store i32 0, i32* @a, align 4, !tbaa !0
+ store i32 0, i32* @a, align 4
br label %label.preheader
label.preheader: ; preds = %for.end36, %if.end30, %land.rhs
%i.0.load4669 = phi i32 [ 1, %land.rhs ], [ %i.0.load46, %if.end30 ], [ 1, %for.end36 ]
%pi.3.ph = phi i32* [ %pi.0, %land.rhs ], [ %pi.4, %if.end30 ], [ %pi.0, %for.end36 ]
- %4 = load i32* @b, align 4, !tbaa !0
+ %4 = load i32* @b, align 4
%inc285863 = add nsw i32 %4, 1
- store i32 %inc285863, i32* @b, align 4, !tbaa !0
+ store i32 %inc285863, i32* @b, align 4
%tobool175964 = icmp eq i32 %inc285863, 0
br i1 %tobool175964, label %if.end30, label %while.condthread-pre-split.lr.ph.lr.ph
while.condthread-pre-split.lr.ph.lr.ph: ; preds = %label.preheader
- %.pr50 = load i32* @d, align 4, !tbaa !0
+ %.pr50 = load i32* @d, align 4
%tobool19 = icmp eq i32 %.pr50, 0
- %a.promoted.pre = load i32* @a, align 4, !tbaa !0
+ %a.promoted.pre = load i32* @a, align 4
br label %while.condthread-pre-split
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/X86/pr14090.ll b/test/CodeGen/X86/pr14090.ll
index d76b912fd8..2f7c720386 100644
--- a/test/CodeGen/X86/pr14090.ll
+++ b/test/CodeGen/X86/pr14090.ll
@@ -48,11 +48,11 @@ entry:
%fifteen = bitcast i64* %retval.i.i to i32**
%sixteen = bitcast i64* %retval.i.i to i8*
call void @llvm.lifetime.start(i64 8, i8* %sixteen)
- store i32* %.ph.i80, i32** %fifteen, align 8, !tbaa !0
+ store i32* %.ph.i80, i32** %fifteen, align 8
%sunkaddr = ptrtoint i64* %retval.i.i to i32
%sunkaddr86 = add i32 %sunkaddr, 4
%sunkaddr87 = inttoptr i32 %sunkaddr86 to i32*
- store i32 %fourteen, i32* %sunkaddr87, align 4, !tbaa !3
+ store i32 %fourteen, i32* %sunkaddr87, align 4
%seventeen = load i64* %retval.i.i, align 8
call void @llvm.lifetime.end(i64 8, i8* %sixteen)
%eighteen = lshr i64 %seventeen, 32
@@ -68,9 +68,3 @@ entry:
declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
-!4 = metadata !{metadata !"vtable pointer", metadata !2}