summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/LangRef.html79
-rw-r--r--include/llvm/AutoUpgrade.h38
-rw-r--r--include/llvm/CodeGen/ValueTypes.h10
-rw-r--r--include/llvm/CodeGen/ValueTypes.td2
-rw-r--r--include/llvm/Intrinsics.td79
-rw-r--r--lib/Analysis/ConstantFolding.cpp6
-rw-r--r--lib/AsmParser/llvmAsmParser.y6
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp39
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.h5
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp12
-rw-r--r--lib/Target/X86/X86TargetAsmInfo.cpp4
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp4
-rw-r--r--lib/VMCore/AutoUpgrade.cpp197
-rw-r--r--lib/VMCore/Verifier.cpp133
-rw-r--r--test/Assembler/AutoUpgradeIntrinsics.ll52
-rw-r--r--test/Bitcode/AutoUpgradeIntrinsics.ll10
-rw-r--r--test/Bitcode/AutoUpgradeIntrinsics.ll.bcbin0 -> 800 bytes
-rw-r--r--test/CodeGen/Alpha/ctlz.ll7
-rw-r--r--test/CodeGen/Generic/bit-intrinsics.ll16
-rw-r--r--test/CodeGen/PowerPC/2007-03-24-cntlzd.ll5
-rw-r--r--test/Feature/llvm2cpp.ll72
-rw-r--r--test/Transforms/InstCombine/bitcount.ll14
-rw-r--r--tools/llvm-upgrade/UpgradeParser.y28
-rw-r--r--utils/TableGen/CodeGenIntrinsics.h4
-rw-r--r--utils/TableGen/CodeGenTarget.cpp5
-rw-r--r--utils/TableGen/IntrinsicEmitter.cpp101
-rw-r--r--utils/TableGen/RegisterInfoEmitter.cpp2
27 files changed, 607 insertions, 323 deletions
diff --git a/docs/LangRef.html b/docs/LangRef.html
index 79d6f8820c..35990bdcdd 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -3714,17 +3714,27 @@ of an intrinsic function. Additionally, because intrinsic functions are part
of the LLVM language, it is required if any are added that they be documented
here.</p>
-<p>Some intrinsic functions can be overloaded, i.e., the intrinsic represents
-a family of functions that perform the same operation but on different data
-types. This is most frequent with the integer types. Since LLVM can represent
-over 8 million different integer types, there is a way to declare an intrinsic
-that can be overloaded based on its arguments. Such an intrinsic will have the
-names of its argument types encoded into its function name, each
-preceded by a period. For example, the <tt>llvm.ctpop</tt> function can take an
-integer of any width. This leads to a family of functions such as
-<tt>i32 @llvm.ctpop.i8(i8 %val)</tt> and <tt>i32 @llvm.ctpop.i29(i29 %val)</tt>.
-</p>
-
+<p>Some intrinsic functions can be overloaded, i.e., the intrinsic represents
+a family of functions that perform the same operation but on different data
+types. Because LLVM can represent over 8 million different integer types,
+overloading is used commonly to allow an intrinsic function to operate on any
+integer type. One or more of the argument types or the result type can be
+overloaded to accept any integer type. Argument types may also be defined as
+exactly matching a previous argument's type or the result type. This allows an
+intrinsic function which accepts multiple arguments, but needs all of them to
+be of the same type, to only be overloaded with respect to a single argument or
+the result.</p>
+
+<p>Overloaded intrinsics will have the names of its overloaded argument types
+encoded into its function name, each preceded by a period. Only those types
+which are overloaded result in a name suffix. Arguments whose type is matched
+against another type do not. For example, the <tt>llvm.ctpop</tt> function can
+take an integer of any width and returns an integer of exactly the same integer
+width. This leads to a family of functions such as
+<tt>i8 @llvm.ctpop.i8(i8 %val)</tt> and <tt>i29 @llvm.ctpop.i29(i29 %val)</tt>.
+Only one type, the return type, is overloaded, and only one type suffix is
+required. Because the argument's type is matched against the return type, it
+does not require its own name suffix.</p>
<p>To learn how to add an intrinsic function, please see the
<a href="ExtendingLLVM.html">Extending LLVM Guide</a>.
@@ -4558,12 +4568,11 @@ These allow efficient code generation for some algorithms.
<h5>Syntax:</h5>
<p>This is an overloaded intrinsic function. You can use bswap on any integer
-type that is an even number of bytes (i.e. BitWidth % 16 == 0). Note the suffix
-that includes the type for the result and the operand.
+type that is an even number of bytes (i.e. BitWidth % 16 == 0).
<pre>
- declare i16 @llvm.bswap.i16.i16(i16 &lt;id&gt;)
- declare i32 @llvm.bswap.i32.i32(i32 &lt;id&gt;)
- declare i64 @llvm.bswap.i64.i64(i64 &lt;id&gt;)
+ declare i16 @llvm.bswap.i16(i16 &lt;id&gt;)
+ declare i32 @llvm.bswap.i32(i32 &lt;id&gt;)
+ declare i64 @llvm.bswap.i64(i64 &lt;id&gt;)
</pre>
<h5>Overview:</h5>
@@ -4578,12 +4587,12 @@ byte order.
<h5>Semantics:</h5>
<p>
-The <tt>llvm.bswap.16.i16</tt> intrinsic returns an i16 value that has the high
+The <tt>llvm.bswap.i16</tt> intrinsic returns an i16 value that has the high
and low byte of the input i16 swapped. Similarly, the <tt>llvm.bswap.i32</tt>
intrinsic returns an i32 value that has the four bytes of the input i32
swapped, so that if the input bytes are numbered 0, 1, 2, 3 then the returned
-i32 will have its bytes in 3, 2, 1, 0 order. The <tt>llvm.bswap.i48.i48</tt>,
-<tt>llvm.bswap.i64.i64</tt> and other intrinsics extend this concept to
+i32 will have its bytes in 3, 2, 1, 0 order. The <tt>llvm.bswap.i48</tt>,
+<tt>llvm.bswap.i64</tt> and other intrinsics extend this concept to
additional even-byte lengths (6 bytes, 8 bytes and more, respectively).
</p>
@@ -4600,11 +4609,11 @@ additional even-byte lengths (6 bytes, 8 bytes and more, respectively).
<p>This is an overloaded intrinsic. You can use llvm.ctpop on any integer bit
width. Not all targets support all bit widths however.
<pre>
- declare i32 @llvm.ctpop.i8 (i8 &lt;src&gt;)
- declare i32 @llvm.ctpop.i16(i16 &lt;src&gt;)
+ declare i8 @llvm.ctpop.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.ctpop.i16(i16 &lt;src&gt;)
declare i32 @llvm.ctpop.i32(i32 &lt;src&gt;)
- declare i32 @llvm.ctpop.i64(i64 &lt;src&gt;)
- declare i32 @llvm.ctpop.i256(i256 &lt;src&gt;)
+ declare i64 @llvm.ctpop.i64(i64 &lt;src&gt;)
+ declare i256 @llvm.ctpop.i256(i256 &lt;src&gt;)
</pre>
<h5>Overview:</h5>
@@ -4639,11 +4648,11 @@ The '<tt>llvm.ctpop</tt>' intrinsic counts the 1's in a variable.
<p>This is an overloaded intrinsic. You can use <tt>llvm.ctlz</tt> on any
integer bit width. Not all targets support all bit widths however.
<pre>
- declare i32 @llvm.ctlz.i8 (i8 &lt;src&gt;)
- declare i32 @llvm.ctlz.i16(i16 &lt;src&gt;)
+ declare i8 @llvm.ctlz.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.ctlz.i16(i16 &lt;src&gt;)
declare i32 @llvm.ctlz.i32(i32 &lt;src&gt;)
- declare i32 @llvm.ctlz.i64(i64 &lt;src&gt;)
- declare i32 @llvm.ctlz.i256(i256 &lt;src&gt;)
+ declare i64 @llvm.ctlz.i64(i64 &lt;src&gt;)
+ declare i256 @llvm.ctlz.i256(i256 &lt;src&gt;)
</pre>
<h5>Overview:</h5>
@@ -4682,11 +4691,11 @@ of src. For example, <tt>llvm.ctlz(i32 2) = 30</tt>.
<p>This is an overloaded intrinsic. You can use <tt>llvm.cttz</tt> on any
integer bit width. Not all targets support all bit widths however.
<pre>
- declare i32 @llvm.cttz.i8 (i8 &lt;src&gt;)
- declare i32 @llvm.cttz.i16(i16 &lt;src&gt;)
+ declare i8 @llvm.cttz.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.cttz.i16(i16 &lt;src&gt;)
declare i32 @llvm.cttz.i32(i32 &lt;src&gt;)
- declare i32 @llvm.cttz.i64(i64 &lt;src&gt;)
- declare i32 @llvm.cttz.i256(i256 &lt;src&gt;)
+ declare i64 @llvm.cttz.i64(i64 &lt;src&gt;)
+ declare i256 @llvm.cttz.i256(i256 &lt;src&gt;)
</pre>
<h5>Overview:</h5>
@@ -4723,8 +4732,8 @@ of src. For example, <tt>llvm.cttz(2) = 1</tt>.
<p>This is an overloaded intrinsic. You can use <tt>llvm.part.select</tt>
on any integer bit width.
<pre>
- declare i17 @llvm.part.select.i17.i17 (i17 %val, i32 %loBit, i32 %hiBit)
- declare i29 @llvm.part.select.i29.i29 (i29 %val, i32 %loBit, i32 %hiBit)
+ declare i17 @llvm.part.select.i17 (i17 %val, i32 %loBit, i32 %hiBit)
+ declare i29 @llvm.part.select.i29 (i29 %val, i32 %loBit, i32 %hiBit)
</pre>
<h5>Overview:</h5>
@@ -4770,8 +4779,8 @@ returned in the reverse order. So, for example, if <tt>X</tt> has the value
<p>This is an overloaded intrinsic. You can use <tt>llvm.part.set</tt>
on any integer bit width.
<pre>
- declare i17 @llvm.part.set.i17.i17.i9 (i17 %val, i9 %repl, i32 %lo, i32 %hi)
- declare i29 @llvm.part.set.i29.i29.i9 (i29 %val, i9 %repl, i32 %lo, i32 %hi)
+ declare i17 @llvm.part.set.i17.i9 (i17 %val, i9 %repl, i32 %lo, i32 %hi)
+ declare i29 @llvm.part.set.i29.i9 (i29 %val, i9 %repl, i32 %lo, i32 %hi)
</pre>
<h5>Overview:</h5>
diff --git a/include/llvm/AutoUpgrade.h b/include/llvm/AutoUpgrade.h
new file mode 100644
index 0000000000..e3a32b93c9
--- /dev/null
+++ b/include/llvm/AutoUpgrade.h
@@ -0,0 +1,38 @@
+//===-- llvm/AutoUpgrade.h - AutoUpgrade Helpers ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file was developed by Chandler Carruth is distributed under the
+// University of Illinois Open Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These functions are implemented by lib/VMCore/AutoUpgrade.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AUTOUPGRADE_H
+#define LLVM_AUTOUPGRADE_H
+
+namespace llvm {
+ class Function;
+ class CallInst;
+ class BasicBlock;
+
+ /// This is a more granular function that simply checks an intrinsic function
+ /// for upgrading, and if it requires upgrading provides the new function.
+ Function* UpgradeIntrinsicFunction(Function *F);
+
+ /// This is the complement to the above, replacing a specific call to an
+ /// intrinsic function with a call to the specified new function.
+ void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn);
+
+ /// This is an auto-upgrade hook for any old intrinsic function syntaxes
+ /// which need to have both the function updated as well as all calls updated
+ /// to the new function. This should only be run in a post-processing fashion
+ /// so that it can update all calls to the old function.
+ void UpgradeCallsToIntrinsic(Function* F);
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h
index 84d80606b5..4ddacba319 100644
--- a/include/llvm/CodeGen/ValueTypes.h
+++ b/include/llvm/CodeGen/ValueTypes.h
@@ -67,14 +67,14 @@ namespace MVT { // MVT = Machine Value Types
LAST_VALUETYPE = 27, // This always remains at the end of the list.
- // iAny - An integer value of any bit width. This is used for intrinsics
- // that have overloadings based on integer bit widths. This is only for
- // tblgen's consumption!
- iAny = 254,
+ // iAny - An integer or vector integer value of any bit width. This is
+ // used for intrinsics that have overloadings based on integer bit widths.
+ // This is only for tblgen's consumption!
+ iAny = 254,
// iPTR - An int value the size of the pointer of the current
// target. This should only be used internal to tblgen!
- iPTR = 255
+ iPTR = 255
};
/// MVT::ValueType - This type holds low-level value types. Valid values
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
index a133875d9d..47678d1de9 100644
--- a/include/llvm/CodeGen/ValueTypes.td
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -50,7 +50,7 @@ def v4f32 : ValueType<128, 25>; // 4 x f32 vector value
def v2f64 : ValueType<128, 26>; // 2 x f64 vector value
// Pseudo valuetype to represent "integer of any bit width"
-def iAny : ValueType<0 , 254>; // integer value of any bit width
+def iAny : ValueType<0 , 254>;
// Pseudo valuetype mapped to the current pointer size.
def iPTR : ValueType<0 , 255>;
diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td
index 205eac509a..91f12841f4 100644
--- a/include/llvm/Intrinsics.td
+++ b/include/llvm/Intrinsics.td
@@ -52,59 +52,48 @@ def IntrWriteMem : IntrinsicProperty;
// Types used by intrinsics.
//===----------------------------------------------------------------------===//
-class LLVMType<ValueType vt, string typeval> {
+class LLVMType<ValueType vt> {
ValueType VT = vt;
- string TypeVal = typeval;
}
-class LLVMIntegerType<ValueType VT, int width>
- : LLVMType<VT, "Type::IntegerTyID"> {
- int Width = width;
-}
-
-class LLVMVectorType<ValueType VT, int numelts, LLVMType elty>
- : LLVMType<VT, "Type::VectorTyID">{
- int NumElts = numelts;
- LLVMType ElTy = elty;
-}
-
class LLVMPointerType<LLVMType elty>
- : LLVMType<iPTR, "Type::PointerTyID">{
+ : LLVMType<iPTR>{
LLVMType ElTy = elty;
}
-class LLVMEmptyStructType
- : LLVMType<OtherVT, "Type::StructTyID">{
+class LLVMMatchType<int num>
+ : LLVMType<OtherVT>{
+ int Number = num;
}
-def llvm_void_ty : LLVMType<isVoid, "Type::VoidTyID">;
-def llvm_int_ty : LLVMIntegerType<iAny, 0>;
-def llvm_i1_ty : LLVMIntegerType<i1 , 1>;
-def llvm_i8_ty : LLVMIntegerType<i8 , 8>;
-def llvm_i16_ty : LLVMIntegerType<i16, 16>;
-def llvm_i32_ty : LLVMIntegerType<i32, 32>;
-def llvm_i64_ty : LLVMIntegerType<i64, 64>;
-def llvm_float_ty : LLVMType<f32, "Type::FloatTyID">;
-def llvm_double_ty : LLVMType<f64, "Type::DoubleTyID">;
+def llvm_void_ty : LLVMType<isVoid>;
+def llvm_anyint_ty : LLVMType<iAny>;
+def llvm_i1_ty : LLVMType<i1>;
+def llvm_i8_ty : LLVMType<i8>;
+def llvm_i16_ty : LLVMType<i16>;
+def llvm_i32_ty : LLVMType<i32>;
+def llvm_i64_ty : LLVMType<i64>;
+def llvm_float_ty : LLVMType<f32>;
+def llvm_double_ty : LLVMType<f64>;
def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8*
def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8**
-def llvm_empty_ty : LLVMEmptyStructType; // { }
+def llvm_empty_ty : LLVMType<OtherVT>; // { }
def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
-def llvm_v16i8_ty : LLVMVectorType<v16i8,16, llvm_i8_ty>; // 16 x i8
-def llvm_v8i16_ty : LLVMVectorType<v8i16, 8, llvm_i16_ty>; // 8 x i16
-def llvm_v2i64_ty : LLVMVectorType<v2i64, 2, llvm_i64_ty>; // 2 x i64
-def llvm_v2i32_ty : LLVMVectorType<v2i32, 2, llvm_i32_ty>; // 2 x i32
-def llvm_v1i64_ty : LLVMVectorType<v1i64, 1, llvm_i64_ty>; // 1 x i64
-def llvm_v4i32_ty : LLVMVectorType<v4i32, 4, llvm_i32_ty>; // 4 x i32
-def llvm_v4f32_ty : LLVMVectorType<v4f32, 4, llvm_float_ty>; // 4 x float
-def llvm_v2f64_ty : LLVMVectorType<v2f64, 2, llvm_double_ty>;// 2 x double
+def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
+def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16
+def llvm_v2i64_ty : LLVMType<v2i64>; // 2 x i64
+def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
+def llvm_v1i64_ty : LLVMType<v1i64>; // 1 x i64
+def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
+def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float
+def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double
// MMX Vector Types
-def llvm_v8i8_ty : LLVMVectorType<v8i8, 8, llvm_i8_ty>; // 8 x i8
-def llvm_v4i16_ty : LLVMVectorType<v4i16, 4, llvm_i16_ty>; // 4 x i16
+def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
+def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16
-def llvm_vararg_ty : LLVMType<isVoid, "...">; // vararg
+def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here
//===----------------------------------------------------------------------===//
// Intrinsic Definitions.
@@ -185,10 +174,10 @@ let Properties = [IntrWriteArgMem] in {
}
let Properties = [IntrNoMem] in {
- def int_sqrt_f32 : Intrinsic<[llvm_float_ty , llvm_float_ty]>;
+ def int_sqrt_f32 : Intrinsic<[llvm_float_ty, llvm_float_ty]>;
def int_sqrt_f64 : Intrinsic<[llvm_double_ty, llvm_double_ty]>;
- def int_powi_f32 : Intrinsic<[llvm_float_ty , llvm_float_ty, llvm_i32_ty]>;
+ def int_powi_f32 : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_i32_ty]>;
def int_powi_f64 : Intrinsic<[llvm_double_ty, llvm_double_ty, llvm_i32_ty]>;
}
@@ -203,14 +192,14 @@ def int_siglongjmp : Intrinsic<[llvm_void_ty, llvm_ptr_ty, llvm_i32_ty]>;
// None of these intrinsics accesses memory at all.
let Properties = [IntrNoMem] in {
- def int_bswap: Intrinsic<[llvm_int_ty, llvm_int_ty]>;
- def int_ctpop: Intrinsic<[llvm_i32_ty, llvm_int_ty]>;
- def int_ctlz : Intrinsic<[llvm_i32_ty, llvm_int_ty]>;
- def int_cttz : Intrinsic<[llvm_i32_ty, llvm_int_ty]>;
+ def int_bswap: Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>]>;
+ def int_ctpop: Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>]>;
+ def int_ctlz : Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>]>;
+ def int_cttz : Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>]>;
def int_part_select :
- Intrinsic<[llvm_int_ty, llvm_int_ty, llvm_i32_ty, llvm_i32_ty]>;
+ Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty]>;
def int_part_set :
- Intrinsic<[llvm_int_ty, llvm_int_ty, llvm_int_ty, llvm_i32_ty,
+ Intrinsic<[llvm_anyint_ty, LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty,
llvm_i32_ty]>;
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index e85d150204..dedeb4edf3 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -448,13 +448,13 @@ llvm::ConstantFoldCall(Function *F, Constant** Operands, unsigned NumOperands) {
return ConstantInt::get(Op->getValue().byteSwap());
} else if (Name.size() > 11 && !memcmp(&Name[0],"llvm.ctpop",10)) {
uint64_t ctpop = Op->getValue().countPopulation();
- return ConstantInt::get(Type::Int32Ty, ctpop);
+ return ConstantInt::get(Ty, ctpop);
} else if (Name.size() > 10 && !memcmp(&Name[0], "llvm.cttz", 9)) {
uint64_t cttz = Op->getValue().countTrailingZeros();
- return ConstantInt::get(Type::Int32Ty, cttz);
+ return ConstantInt::get(Ty, cttz);
} else if (Name.size() > 10 && !memcmp(&Name[0], "llvm.ctlz", 9)) {
uint64_t ctlz = Op->getValue().countLeadingZeros();
- return ConstantInt::get(Type::Int32Ty, ctlz);
+ return ConstantInt::get(Ty, ctlz);
}
}
} else if (NumOperands == 2) {
diff --git a/lib/AsmParser/llvmAsmParser.y b/lib/AsmParser/llvmAsmParser.y
index f93fe06d7e..9d7b063d0c 100644
--- a/lib/AsmParser/llvmAsmParser.y
+++ b/lib/AsmParser/llvmAsmParser.y
@@ -18,6 +18,7 @@
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/ValueSymbolTable.h"
+#include "llvm/AutoUpgrade.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/SmallVector.h"
@@ -131,6 +132,11 @@ static struct PerModuleInfo {
return;
}
+ // Look for intrinsic functions and CallInst that need to be upgraded
+ for (Module::iterator FI = CurrentModule->begin(),
+ FE = CurrentModule->end(); FI != FE; )
+ UpgradeCallsToIntrinsic(FI++); // must be post-increment, as we remove
+
Values.clear(); // Clear out function local definitions
Types.clear();
CurrentModule = 0;
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 9c1f49e865..07a4279e13 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -19,6 +19,7 @@
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/ParameterAttributes.h"
+#include "llvm/AutoUpgrade.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -857,6 +858,13 @@ bool BitcodeReader::ParseModule(const std::string &ModuleID) {
if (!FunctionsWithBodies.empty())
return Error("Too few function bodies found");
+ // Look for intrinsic functions which need to be upgraded at some point
+ for (Module::iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ if (Function* NewFn = UpgradeIntrinsicFunction(FI))
+ UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
+ }
+
// Force deallocation of memory for these vectors to favor the client that
// want lazy deserialization.
std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
@@ -1588,6 +1596,18 @@ bool BitcodeReader::materializeFunction(Function *F, std::string *ErrInfo) {
if (ErrInfo) *ErrInfo = ErrorString;
return true;
}
+
+ // Upgrade any old intrinsic calls in the function.
+ for (UpgradedIntrinsicMap::iterator I = UpgradedIntrinsics.begin(),
+ E = UpgradedIntrinsics.end(); I != E; ++I) {
+ if (I->first != I->second) {
+ for (Value::use_iterator UI = I->first->use_begin(),
+ UE = I->first->use_end(); UI != UE; ) {
+ if (CallInst* CI = dyn_cast<CallInst>(*UI++))
+ UpgradeIntrinsicCall(CI, I->second);
+ }
+ }
+ }
return false;
}
@@ -1614,6 +1634,25 @@ Module *BitcodeReader::materializeModule(std::string *ErrInfo) {
materializeFunction(F, ErrInfo))
return 0;
}
+
+ // Upgrade any intrinsic calls that slipped through (should not happen!) and
+ // delete the old functions to clean up. We can't do this unless the entire
+ // module is materialized because there could always be another function body
+ // with calls to the old function.
+ for (std::vector<std::pair<Function*, Function*> >::iterator I =
+ UpgradedIntrinsics.begin(), E = UpgradedIntrinsics.end(); I != E; ++I) {
+ if (I->first != I->second) {
+ for (Value::use_iterator UI = I->first->use_begin(),
+ UE = I->first->use_end(); UI != UE; ) {
+ if (CallInst* CI = dyn_cast<CallInst>(*UI++))
+ UpgradeIntrinsicCall(CI, I->second);
+ }
+ ValueList.replaceUsesOfWith(I->first, I->second);
+ I->first->eraseFromParent();
+ }
+ }
+ std::vector<std::pair<Function*, Function*> >().swap(UpgradedIntrinsics);
+
return TheModule;
}
diff --git a/lib/Bitcode/Reader/BitcodeReader.h b/lib/Bitcode/Reader/BitcodeReader.h
index 2f61b06c60..0655a1a91c 100644
--- a/lib/Bitcode/Reader/BitcodeReader.h
+++ b/lib/Bitcode/Reader/BitcodeReader.h
@@ -102,6 +102,11 @@ class BitcodeReader : public ModuleProvider {
// When reading the module header, this list is populated with functions that
// have bodies later in the file.
std::vector<Function*> FunctionsWithBodies;
+
+ // When intrinsic functions are encountered which require upgrading they are
+ // stored here with their replacement function.
+ typedef std::vector<std::pair<Function*, Function*> > UpgradedIntrinsicMap;
+ UpgradedIntrinsicMap UpgradedIntrinsics;
// After the module header has been read, the FunctionsWithBodies list is
// reversed. This keeps track of whether we've done this yet.
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index d1f7669024..afb681f9bd 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2814,10 +2814,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SDOperand Arg = getValue(I.getOperand(1));
MVT::ValueType Ty = Arg.getValueType();
SDOperand result = DAG.getNode(ISD::CTTZ, Ty, Arg);
- if (Ty < MVT::i32)
- result = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, result);
- else if (Ty > MVT::i32)
- result = DAG.getNode(ISD::TRUNCATE, MVT::i32, result);
setValue(&I, result);
return 0;
}
@@ -2825,10 +2821,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SDOperand Arg = getValue(I.getOperand(1));
MVT::ValueType Ty = Arg.getValueType();
SDOperand result = DAG.getNode(ISD::CTLZ, Ty, Arg);
- if (Ty < MVT::i32)
- result = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, result);
- else if (Ty > MVT::i32)
- result = DAG.getNode(ISD::TRUNCATE, MVT::i32, result);
setValue(&I, result);
return 0;
}
@@ -2836,10 +2828,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SDOperand Arg = getValue(I.getOperand(1));
MVT::ValueType Ty = Arg.getValueType();
SDOperand result = DAG.getNode(ISD::CTPOP, Ty, Arg);
- if (Ty < MVT::i32)
- result = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, result);
- else if (Ty > MVT::i32)
- result = DAG.getNode(ISD::TRUNCATE, MVT::i32, result);
setValue(&I, result);
return 0;
}
diff --git a/lib/Target/X86/X86TargetAsmInfo.cpp b/lib/Target/X86/X86TargetAsmInfo.cpp
index 79df32b565..bf206b4806 100644
--- a/lib/Target/X86/X86TargetAsmInfo.cpp
+++ b/lib/Target/X86/X86TargetAsmInfo.cpp
@@ -219,9 +219,9 @@ bool X86TargetAsmInfo::LowerToBSwap(CallInst *CI) const {
return false;
// Okay, we can do this xform, do so now.
- const Type *Tys[] = { Ty, Ty };
+ const Type *Tys[] = { Ty };
Module *M = CI->getParent()->getParent()->getParent();
- Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 2);
+ Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
Value *Op = CI->getOperand(1);
Op = new CallInst(Int, Op, CI->getName(), CI);
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index e843074920..4330b16746 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -3717,9 +3717,9 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
if (ByteValues[i] != V)
return 0;
- const Type *Tys[] = { ITy, ITy };
+ const Type *Tys[] = { ITy };
Module *M = I.getParent()->getParent()->getParent();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 2);
+ Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
return new CallInst(F, V);
}
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
new file mode 100644
index 0000000000..b56fe70235
--- /dev/null
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -0,0 +1,197 @@
+//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file was developed by Chandler Carruth and is distributed under the
+// University of Illinois Open Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the auto-upgrade helper functions
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/AutoUpgrade.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Instructions.h"
+#include "llvm/ParameterAttributes.h"
+#include "llvm/Intrinsics.h"
+using namespace llvm;
+
+
+Function* llvm::UpgradeIntrinsicFunction(Function *F) {
+ assert(F && "Illegal to upgrade a non-existent Function.");
+
+ // Get the Function's name.
+ const std::string& Name = F->getName();
+
+ // Convenience
+ const FunctionType *FTy = F->getFunctionType();
+
+ // Quickly eliminate it, if it's not a candidate.
+ if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
+ Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
+ return 0;
+
+ Module *M = F->getParent();
+ switch (Name[5]) {
+ default: break;
+ case 'b':
+ // This upgrades the name of the llvm.bswap intrinsic function to only use
+ // a single type name for overloading. We only care about the old format
+ // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
+ // a '.' after 'bswap.'
+ if (Name.compare(5,6,"bswap.",6) == 0) {
+ std::string::size_type delim = Name.find('.',11);
+
+ if (delim != std::string::npos) {
+ // Construct the new name as 'llvm.bswap' + '.i*'
+ F->setName(Name.substr(0,10)+Name.substr(delim));
+ return F;
+ }
+ }
+ break;
+
+ case 'c':
+ // We only want to fix the 'llvm.ct*' intrinsics which do not have the
+ // correct return type, so we check for the name, and then check if the
+ // return type does not match the parameter type.
+ if ( (Name.compare(5,5,"ctpop",5) == 0 ||
+ Name.compare(5,4,"ctlz",4) == 0 ||
+ Name.compare(5,4,"cttz",4) == 0) &&
+ FTy->getReturnType() != FTy->getParamType(0)) {
+ // We first need to change the name of the old (bad) intrinsic, because
+ // its type is incorrect, but we cannot overload that name. We
+ // arbitrarily unique it here allowing us to construct a correctly named
+ // and typed function below.
+ F->setName("");
+
+ // Now construct the new intrinsic with the correct name and type. We
+ // leave the old function around in order to query its type, whatever it
+ // may be, and correctly convert up to the new type.
+ return cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getParamType(0),
+ FTy->getParamType(0),
+ (Type *)0));
+ }
+ break;
+
+ case 'p':
+ // This upgrades the llvm.part.select overloaded intrinsic names to only
+ // use one type specifier in the name. We only care about the old format
+ // 'llvm.part.select.i*.i*', and solve as above with bswap.
+ if (Name.compare(5,12,"part.select.",12) == 0) {
+ std::string::size_type delim = Name.find('.',17);
+
+ if (delim != std::string::npos) {
+ // Construct a new name as 'llvm.part.select' + '.i*'
+ F->setName(Name.substr(0,16)+Name.substr(delim));
+ return F;
+ }
+ break;
+ }
+
+ // This upgrades the llvm.part.set intrinsics similarly as above, however
+ // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
+ // must match. There is an additional type specifier after these two
+ // matching types that we must retain when upgrading. Thus, we require
+ // finding 2 periods, not just one, after the intrinsic name.
+ if (Name.compare(5,9,"part.set.",9) == 0) {
+ std::string::size_type delim = Name.find('.',14);
+
+ if (delim != std::string::npos &&
+ Name.find('.',delim+1) != std::string::npos) {
+ // Construct a new name as 'llvm.part.select' + '.i*.i*'
+ F->setName(Name.substr(0,13)+Name.substr(delim));
+ return F;
+ }
+ break;
+ }
+
+ break;
+ }
+
+ // This may not belong here. This function is effectively being overloaded
+ // to both detect an intrinsic which needs upgrading, and to provide the
+ // upgraded form of the intrinsic. We should perhaps have two separate
+ // functions for this.
+ return 0;
+}
+
+// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
+// upgraded intrinsic. All argument and return casting must be provided in
+// order to seamlessly integrate with existing context.
+void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
+ assert(NewFn && "Cannot upgrade an intrinsic call without a new function.");
+
+ Function *F = CI->getCalledFunction();
+ assert(F && "CallInst has no function associated with it.");
+
+ const FunctionType *FTy = F->getFunctionType();
+ const FunctionType *NewFnTy = NewFn->getFunctionType();
+
+ switch(NewFn->getIntrinsicID()) {
+ default: assert(0 && "Unknown function for CallInst upgrade.");
+ case Intrinsic::ctlz:
+ case Intrinsic::ctpop:
+ case Intrinsic::cttz:
+ // Build a small vector of the 1..(N-1) operands, which are the
+ // parameters.
+ SmallVector<Value*, 8> Operands(CI->op_begin()+1, CI->op_end());
+
+ // Construct a new CallInst
+ CallInst *NewCI = new CallInst(NewFn, Operands.begin(), Operands.end(),
+ "upgraded."+CI->getName(), CI);
+ NewCI->setTailCall(CI->isTailCall());
+ NewCI->setCallingConv(CI->getCallingConv());
+
+ // Handle any uses of the old CallInst.
+ if (!CI->use_empty()) {
+ // Check for sign extend parameter attributes on the return values.
+ bool SrcSExt = NewFnTy->getParamAttrs() &&
+ NewFnTy->getParamAttrs()->paramHasAttr(0,ParamAttr::SExt);
+ bool DestSExt = FTy->getParamAttrs() &&
+ FTy->getParamAttrs()->paramHasAttr(0,ParamAttr::SExt);
+
+ // Construct an appropriate cast from the new return type to the old.
+ CastInst *RetCast = CastInst::create(
+ CastInst::getCastOpcode(NewCI, SrcSExt,
+ F->getReturnType(),
+ DestSExt),
+ NewCI, F->getReturnType(),
+ NewCI->getName(), CI);
+ NewCI->moveBefore(RetCast);
+
+ // Replace all uses of the old call with the new cast which has the
+ // correct type.
+ CI->replaceAllUsesWith(RetCast);
+ }
+
+ // Clean up the old call now that it has been completely upgraded.
+ CI->eraseFromParent();
+ break;
+ }
+}
+
+// This tests each Function to determine if it needs upgrading. When we find
+// one we are interested in, we then upgrade all calls to reflect the new
+// function.
+void llvm::UpgradeCallsToIntrinsic(Function* F) {
+ assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
+
+ // Upgrade the function and check if it is a totaly new function.
+ if (Function* NewFn = UpgradeIntrinsicFunction(F)) {
+ if (NewFn != F) {
+ // Replace all uses to the old function with the new one if necessary.
+ for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
+ UI != UE; ) {
+ if (CallInst* CI = dyn_cast<CallInst>(*UI++))
+ UpgradeIntrinsicCall(CI, NewFn);
+ }
+ // Remove old function, no longer used, from the module.
+ F->eraseFromParent();
+ }
+ }
+}
+
diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp
index 6d4aa986f7..86a72c44fe 100644
--- a/lib/VMCore/Verifier.cpp
+++ b/lib/VMCore/Verifier.cpp
@@ -53,6 +53,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/Streams.h"
@@ -225,7 +226,8 @@ namespace { // Anonymous namespace for class
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
- void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F, ...);
+ void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
+ unsigned Count, ...);
void WriteValue(const Value *V) {
if (!V) return;
@@ -1075,9 +1077,11 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
/// VerifyIntrinsicPrototype - TableGen emits calls to this function into
/// Intrinsics.gen. This implements a little state machine that verifies the
/// prototype of intrinsics.
-void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F, ...) {
+void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID,
+ Function *F,
+ unsigned Count, ...) {
va_list VA;
- va_start(VA, F);
+ va_start(VA, Count);
const FunctionType *FTy = F->getFunctionType();
@@ -1086,97 +1090,94 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F, ...) {
// suffix, to be checked at the end.
std::string Suffix;
- // Note that "arg#0" is the return type.
- for (unsigned ArgNo = 0; 1; ++ArgNo) {
- int TypeID = va_arg(VA, int);
+ if (FTy->getNumParams() + FTy->isVarArg() != Count - 1) {
+ CheckFailed("Intrinsic prototype has incorrect number of arguments!", F);
+ return;
+ }
- if (TypeID == -2) {
- break;
- }
+ // Note that "arg#0" is the return type.
+ for (unsigned ArgNo = 0; ArgNo < Count; ++ArgNo) {
+ MVT::ValueType VT = va_arg(VA, MVT::ValueType);
- if (TypeID == -1) {
- if (ArgNo != FTy->getNumParams()+1)
- CheckFailed("Intrinsic prototype has too many arguments!", F);
+ if (VT == MVT::isVoid && ArgNo > 0) {
+ if (!FTy->isVarArg())
+ CheckFailed("Intrinsic prototype has no '...'!", F);
break;
}
- if (ArgNo == FTy->getNumParams()+1) {
- CheckFailed("Intrinsic prototype has too few arguments!", F);
- break;
- }
-
const Type *Ty;
if (ArgNo == 0)
Ty = FTy->getReturnType();
else
Ty = FTy->getParamType(ArgNo-1);
-
- if (TypeID != Ty->getTypeID()) {
- if (ArgNo == 0)
- CheckFailed("Intrinsic prototype has incorrect result type!", F);
- else
- CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is wrong!",F);
- break;
- }
- if (TypeID == Type::IntegerTyID) {
- unsigned ExpectedBits = (unsigned) va_arg(VA, int);
- unsigned GotBits = cast<IntegerType>(Ty)->getBitWidth();
- if (ExpectedBits == 0) {
- Suffix += ".i" + utostr(GotBits);
- } else if (GotBits != ExpectedBits) {
- std::string bitmsg = " Expected " + utostr(ExpectedBits) + " but got "+
- utostr(GotBits) + " bits.";
- if (ArgNo == 0)
- CheckFailed("Intrinsic prototype has incorrect integer result width!"
- + bitmsg, F);
- else
- CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " has "
- "incorrect integer width!" + bitmsg, F);
- break;
+ unsigned NumElts = 0;
+ const Type *EltTy = Ty;
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ EltTy = VTy->getElementType();
+ NumElts = VTy->getNumElements();
+ }
+
+ if ((int)VT < 0) {
+ int Match = ~VT;
+ if (Match == 0) {
+ if (Ty != FTy->getReturnType()) {
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " does not "
+ "match return type.", F);
+ break;
+ }
+ } else {
+ if (Ty != FTy->getParamType(Match-1)) {
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " does not "
+ "match parameter %" + utostr(Match-1) + ".", F);
+ break;
+ }
}
+ } else if (VT == MVT::iAny) {
+ unsigned GotBits = cast<IntegerType>(EltTy)->getBitWidth();
+ Suffix += ".";
+ if (EltTy != Ty)
+ Suffix += "v" + utostr(NumElts);
+ Suffix += "i" + utostr(GotBits);;
// Check some constraints on various intrinsics.
switch (ID) {
default: break; // Not everything needs to be checked.
case Intrinsic::bswap:
if (GotBits < 16 || GotBits % 16 != 0)
CheckFailed("Intrinsic requires even byte width argument", F);
- /* FALL THROUGH */
- case Intrinsic::part_set:
- case Intrinsic::part_select:
- if (ArgNo == 1) {
- unsigned ResultBits =
- cast<IntegerType>(FTy->getReturnType())->getBitWidth();
- if (GotBits != ResultBits)
- CheckFailed("Intrinsic requires the bit widths of the first "
- "parameter and the result to match", F);
- }
break;
}
- } else if (TypeID == Type::VectorTyID) {
+ } else if (VT == MVT::iPTR) {
+ if (!isa<PointerType>(Ty)) {
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is not a "
+ "pointer and a pointer is required.", F);
+ break;
+ }
+ } else if (MVT::isVector(VT)) {
// If this is a vector argument, verify the number and type of elements.
- const VectorType *PTy = cast<VectorType>(Ty);
- int ElemTy = va_arg(VA, int);
- if (ElemTy != PTy->getElementType()->getTypeID()) {
+ if (MVT::getVectorElementType(VT) != MVT::getValueType(EltTy)) {
CheckFailed("Intrinsic prototype has incorrect vector element type!",
F);
break;
}
- if (ElemTy == Type::IntegerTyID) {
- unsigned NumBits = (unsigned)va_arg(VA, int);
- unsigned ExpectedBits =
- cast<IntegerType>(PTy->getElementType())->getBitWidth();
- if (NumBits != ExpectedBits) {
- CheckFailed("Intrinsic prototype has incorrect vector element type!",
- F);
- break;
- }
- }
- if ((unsigned)va_arg(VA, int) != PTy->getNumElements()) {
+ if (MVT::getVectorNumElements(VT) != NumElts) {
CheckFailed("Intrinsic prototype has incorrect number of "
"vector elements!",F);
- break;
+ break;
}
+ } else if (MVT::getTypeForValueType(VT) != EltTy) {
+ if (ArgNo == 0)
+ CheckFailed("Intrinsic prototype has incorrect result type!", F);
+ else
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is wrong!",F);
+ break;
+ } else if (EltTy != Ty) {
+ if (ArgNo == 0)
+ CheckFailed("Intrinsic result type is vector "
+ "and a scalar is required.", F);
+ else
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is vector "
+ "and a scalar is required.", F);
}
}
diff --git a/test/Assembler/AutoUpgradeIntrinsics.ll b/test/Assembler/AutoUpgradeIntrinsics.ll
new file mode 100644
index 0000000000..fad3d4e4e8
--- /dev/null
+++ b/test/Assembler/AutoUpgradeIntrinsics.ll
@@ -0,0 +1,52 @@
+; Tests to make sure intrinsics are automatically upgraded.
+; RUN: llvm-as < %s | llvm-dis | not grep {i32 @llvm\\.ct}
+; RUN: llvm-as < %s | llvm-dis | \
+; RUN: not grep {llvm\\.part\\.set\\.i\[0-9\]*\\.i\[0-9\]*\\.i\[0-9\]*}
+; RUN: llvm-as < %s | llvm-dis | \
+; RUN: not grep {llvm\\.part\\.select\\.i\[0-9\]*\\.i\[0-9\]*}
+; RUN: llvm-as < %s | llvm-dis | \
+; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
+
+declare i32 @llvm.ctpop.i28(i28 %val)
+declare i32 @llvm.cttz.i29(i29 %val)
+declare i32 @llvm.ctlz.i30(i30 %val)
+
+define i32 @test_ct(i32 %A) {
+ %c1 = call i32 @llvm.ctpop.i28(i28 1234)
+ %c2 = call i32 @llvm.cttz.i29(i29 2345)
+ %c3 = call i32 @llvm.ctlz.i30(i30 3456)
+ %r1 = add i32 %c1, %c2
+ %r2 = add i32 %r1, %c3
+ ret i32 %r2
+}
+
+declare i32 @llvm.part.set.i32.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
+declare i16 @llvm.part.set.i16.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
+define i32 @test_part_set(i32 %A, i16 %B) {
+ %a = call i32 @llvm.part.set.i32.i32.i32(i32 %A, i32 27, i32 8, i32 0)
+ %b = call i16 @llvm.part.set.i16.i16.i16(i16 %B, i16 27, i32 8, i32 0)
+ %c = zext i16 %b to i32
+ %d = add i32 %a, %c
+ ret i32 %d
+}
+
+declare i32 @llvm.part.select.i32.i32(i32 %x, i32 %hi, i32 %lo)
+declare i16 @llvm.part.select.i16.i16(i16 %x, i32 %hi, i32 %lo)
+define i32 @test_part_select(i32 %A, i16 %B) {
+ %a = call i32 @llvm.part.select.i32.i32(i32 %A, i32 8, i32 0)
+ %b = call i16 @llvm.part.select.i16.i16(i16 %B, i32 8, i32 0)
+ %c = zext i16 %b to i32
+ %d = add i32 %a, %c
+ ret i32 %d
+}
+
+declare i32 @llvm.bswap.i32.i32(i32 %x)
+declare i16 @llvm.bswap.i16.i16(i16 %x)
+define i32 @test_bswap(i32 %A, i16 %B) {
+ %a = call i32 @llvm.bswap.i32.i32(i32 %A)
+ %b = call i16 @llvm.bswap.i16.i16(i16 %B)
+ %c = zext i16 %b to i32
+ %d = add i32 %a, %c
+ ret i32 %d
+}
+
diff --git a/test/Bitcode/AutoUpgradeIntrinsics.ll b/test/Bitcode/AutoUpgradeIntrinsics.ll
new file mode 100644
index 0000000000..5f9bcd56f1
--- /dev/null
+++ b/test/Bitcode/AutoUpgradeIntrinsics.ll
@@ -0,0 +1,10 @@
+; This isn't really an assembly file. It just runs test on bitcode to ensure
+; it is auto-upgraded.
+; RUN: llvm-dis < %s.bc | not grep {i32 @llvm\\.ct}
+; RUN: llvm-dis < %s.bc | \
+; RUN: not grep {llvm\\.part\\.set\\.i\[0-9\]*\\.i\[0-9\]*\\.i\[0-9\]*}
+; RUN: llvm-dis < %s.bc | \
+; RUN: not grep {llvm\\.part\\.select\\.i\[0-9\]*\\.i\[0-9\]*}
+; RUN: llvm-dis < %s.bc | \
+; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
+
diff --git a/test/Bitcode/AutoUpgradeIntrinsics.ll.bc b/test/Bitcode/AutoUpgradeIntrinsics.ll.bc
new file mode 100644
index 0000000000..9de756ba68
--- /dev/null
+++ b/test/Bitcode/AutoUpgradeIntrinsics.ll.bc
Binary files differ
diff --git a/test/CodeGen/Alpha/ctlz.ll b/test/CodeGen/Alpha/ctlz.ll
index 0ad014dbf9..fba60227f8 100644
--- a/test/CodeGen/Alpha/ctlz.ll
+++ b/test/CodeGen/Alpha/ctlz.ll
@@ -5,10 +5,11 @@
; RUN: llvm-as < %s | llc -march=alpha -mcpu=ev56 | not grep -i ctlz
; RUN: llvm-as < %s | llc -march=alpha -mattr=-CIX | not grep -i ctlz
-declare i32 @llvm.ctlz.i8(i8)
+declare i8 @llvm.ctlz.i8(i8)
define i32 @bar(i8 %x) {
entry:
- %tmp.1 = call i32 @llvm.ctlz.i8( i8 %x )
- ret i32 %tmp.1
+ %tmp.1 = call i8 @llvm.ctlz.i8( i8 %x )
+ %tmp.2 = sext i8 %tmp.1 to i32
+ ret i32 %tmp.2
}
diff --git a/test/CodeGen/Generic/bit-intrinsics.ll b/test/CodeGen/Generic/bit-intrinsics.ll
index 427387f973..95410114bf 100644
--- a/test/CodeGen/Generic/bit-intrinsics.ll
+++ b/test/CodeGen/Generic/bit-intrinsics.ll
@@ -3,21 +3,21 @@
; RUN: llvm-as < %s > %t.bc
; RUN: lli --force-interpreter=true %t.bc
-declare i32 @llvm.part.set.i32.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
-declare i16 @llvm.part.set.i16.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
+declare i32 @llvm.part.set.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
+declare i16 @llvm.part.set.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
define i32 @test_part_set(i32 %A, i16 %B) {
- %a = call i32 @llvm.part.set.i32.i32.i32(i32 %A, i32 27, i32 8, i32 0)
- %b = call i16 @llvm.part.set.i16.i16.i16(i16 %B, i16 27, i32 8, i32 0)
+ %a = call i32 @llvm.part.set.i32.i32(i32 %A, i32 27, i32 8, i32 0)
+ %b = call i16 @llvm.part.set.i16.i16(i16 %B, i16 27, i32 8, i32 0)
%c = zext i16 %b to i32
%d = add i32 %a, %c
ret i32 %d
}
-declare i32 @llvm.part.select.i32.i32(i32 %x, i32 %hi, i32 %lo)
-declare i16 @llvm.part.select.i16.i16(i16 %x, i32 %hi, i32 %lo)
+declare i32 @llvm.part.select.i32(i32 %x, i32 %hi, i32 %lo)
+declare i16 @llvm.part.select.i16(i16 %x, i32 %hi, i32 %lo)
define i32 @test_part_select(i32 %A, i16 %B) {
- %a = call i32 @llvm.part.select.i32.i32(i32 %A, i32 8, i32 0)
- %b = call i16 @llvm.part.select.i16.i16(i16 %B, i32 8, i32 0)
+ %a = call i32 @llvm.part.select.i32(i32 %A, i32 8, i32 0)
+ %b = call i16 @llvm.part.select.i16(i16 %B, i32 8, i32 0)
%c = zext i16 %b to i32
%d = add i32 %a, %c
ret i32 %d
diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
index 1ea61746bb..098e7484e1 100644
--- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
+++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
@@ -2,10 +2,11 @@
define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) {
%tmp19 = load i64* %t
- %tmp23 = tail call i32 @llvm.ctlz.i64( i64 %tmp19 ) ; <i64> [#uses=1]
+ %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19 ) ; <i64> [#uses=1]
+ %tmp23 = trunc i64 %tmp22 to i32
%tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1]
%tmp90 = add i32 %tmp89, 0 ; <i32> [#uses=1]
ret i32 %tmp90
}
-declare i32 @llvm.ctlz.i64(i64)
+declare i64 @llvm.ctlz.i64(i64)
diff --git a/test/Feature/llvm2cpp.ll b/test/Feature/llvm2cpp.ll
index 88cfa34380..5c381c728e 100644
--- a/test/Feature/llvm2cpp.ll
+++ b/test/Feature/llvm2cpp.ll
@@ -403,30 +403,6 @@ declare i1 @llvm.isunordered.f64(double, double)
declare void @llvm.prefetch(i8*, i32, i32)
-declare i32 @upgrd.rm.llvm.ctpop.i8(i8)
-
-declare i32 @upgrd.rm.llvm.ctpop.i16(i16)
-
-declare i32 @upgrd.rm.llvm.ctpop.i32(i32)
-
-declare i32 @upgrd.rm.llvm.ctpop.i64(i64)
-
-declare i32 @upgrd.rm.llvm.cttz.i8(i8)
-
-declare i32 @upgrd.rm.llvm.cttz.i16(i16)
-
-declare i32 @upgrd.rm.llvm.cttz.i32(i32)
-
-declare i32 @upgrd.rm.llvm.cttz.i64(i64)
-
-declare i32 @upgrd.rm.llvm.ctlz.i8(i8)
-
-declare i32 @upgrd.rm.llvm.ctlz.i16(i16)
-
-declare i32 @upgrd.rm.llvm.ctlz.i32(i32)
-
-declare i32 @upgrd.rm.llvm.ctlz.i64(i64)
-
declare float @llvm.sqrt.f32(float)
declare double @llvm.sqrt.f64(double)
@@ -437,56 +413,44 @@ define void @libm() {
call void @llvm.prefetch( i8* null, i32 1, i32 3 )
call float @llvm.sqrt.f32( float 5.000000e+00 ) ; <float>:3 [#uses=0]
call double @llvm.sqrt.f64( double 6.000000e+00 ) ; <double>:4 [#uses=0]
- call i32 @llvm.ctpop.i8( i8 10 ) ; <i32>:5 [#uses=1]
- bitcast i32 %5 to i32 ; <i32>:6 [#uses=0]
- call i32 @llvm.ctpop.i16( i16 11 ) ; <i32>:7 [#uses=1]
- bitcast i32 %7 to i32 ; <i32>:8 [#uses=0]
+ call i8 @llvm.ctpop.i8( i8 10 ) ; <i32>:5 [#uses=1]
+ call i16 @llvm.ctpop.i16( i16 11 ) ; <i32>:7 [#uses=1]
call i32 @llvm.ctpop.i32( i32 12 ) ; <i32>:9 [#uses=1]
- bitcast i32 %9 to i32 ; <i32>:10 [#uses=0]
- call i32 @llvm.ctpop.i64( i64 13 ) ; <i32>:11 [#uses=1]
- bitcast i32 %11 to i32 ; <i32>:12 [#uses=0]
- call i32 @llvm.ctlz.i8( i8 14 ) ; <i32>:13 [#uses=1]
- bitcast i32 %13 to i32 ; <i32>:14 [#uses=0]
- call i32 @llvm.ctlz.i16( i16 15 ) ; <i32>:15 [#uses=1]
- bitcast i32 %15 to i32 ; <i32>:16 [#uses=0]
+ call i64 @llvm.ctpop.i64( i64 13 ) ; <i32>:11 [#uses=1]
+ call i8 @llvm.ctlz.i8( i8 14 ) ; <i32>:13 [#uses=1]
+ call i16 @llvm.ctlz.i16( i16 15 ) ; <i32>:15 [#uses=1]
call i32 @llvm.ctlz.i32( i32 16 ) ; <i32>:17 [#uses=1]
- bitcast i32 %17 to i32 ; <i32>:18 [#uses=0]
- call i32 @llvm.ctlz.i64( i64 17 ) ; <i32>:19 [#uses=1]
- bitcast i32 %19 to i32 ; <i32>:20 [#uses=0]
- call i32 @llvm.cttz.i8( i8 18 ) ; <i32>:21 [#uses=1]
- bitcast i32 %21 to i32 ; <i32>:22 [#uses=0]
- call i32 @llvm.cttz.i16( i16 19 ) ; <i32>:23 [#uses=1]
- bitcast i32 %23 to i32 ; <i32>:24 [#uses=0]
+ call i64 @llvm.ctlz.i64( i64 17 ) ; <i32>:19 [#uses=1]
+ call i8 @llvm.cttz.i8( i8 18 ) ; <i32>:21 [#uses=1]
+ call i16 @llvm.cttz.i16( i16 19 ) ; <i32>:23 [#uses=1]
call i32 @llvm.cttz.i32( i32 20 ) ; <i32>:25 [#uses=1]
- bitcast i32 %25 to i32 ; <i32>:26 [#uses=0]
- call i32 @llvm.cttz.i64( i64 21 ) ; <i32>:27 [#uses=1]
- bitcast i32 %27 to i32 ; <i32>:28 [#uses=0]
+ call i64 @llvm.cttz.i64( i64 21 ) ; <i32>:27 [#uses=1]
ret void
}
-declare i32 @llvm.ctpop.i8(i8)
+declare i8 @llvm.ctpop.i8(i8)
-declare i32 @llvm.ctpop.i16(i16)
+declare i16 @llvm.ctpop.i16(i16)
declare i32 @llvm.ctpop.i32(i32)
-declare i32 @llvm.ctpop.i64(i64)
+declare i64 @llvm.ctpop.i64(i64)
-declare i32 @llvm.ctlz.i8(i8)
+declare i8 @llvm.ctlz.i8(i8)
-declare i32 @llvm.ctlz.i16(i16)
+declare i16 @llvm.ctlz.i16(i16)
declare i32 @llvm.ctlz.i32(i32)
-declare i32 @llvm.ctlz.i64(i64)
+declare i64 @llvm.ctlz.i64(i64)
-declare i32 @llvm.cttz.i8(i8)
+declare i8 @llvm.cttz.i8(i8)
-declare i32 @llvm.cttz.i16(i16)
+declare i16 @llvm.cttz.i16(i16)
declare i32 @llvm.cttz.i32(i32)
-declare i32 @llvm.cttz.i64(i64)
+declare i64 @llvm.cttz.i64(i64)
; ModuleID = 'packed.ll'
@foo1 = external global <4 x float> ; <<4 x float>*> [#uses=2]
diff --git a/test/Transforms/InstCombine/bitcount.ll b/test/Transforms/InstCombine/bitcount.ll
index da539200e7..8ebf289eb7 100644
--- a/test/Transforms/InstCombine/bitcount.ll
+++ b/test/Transforms/InstCombine/bitcount.ll
@@ -3,15 +3,17 @@
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
; RUN: grep -v declare | not grep llvm.ct
-declare i32 @llvm.ctpop.i31(i31 %val)
+declare i31 @llvm.ctpop.i31(i31 %val)
declare i32 @llvm.cttz.i32(i32 %val)
-declare i32 @llvm.ctlz.i33(i33 %val)
+declare i33 @llvm.ctlz.i33(i33 %val)
define i32 @test(i32 %A) {
- %c1 = call i32 @llvm.ctpop.i31(i31 12415124)
+ %c1 = call i31 @llvm.ctpop.i31(i31 12415124)
%c2 = call i32 @llvm.cttz.i32(i32 87359874)
- %c3 = call i32 @llvm.ctlz.i33(i33 87359874)
- %r1 = add i32 %c1, %c2
- %r2 = add i32 %r1, %c3
+ %c3 = call i33 @llvm.ctlz.i33(i33 87359874)
+ %t1 = zext i31 %c1 to i32
+ %t3 = trunc i33 %c3 to i32
+ %r1 = add i32 %t1, %c2
+ %r2 = add i32 %r1, %t3
ret i32 %r2
}
diff --git a/tools/llvm-upgrade/UpgradeParser.y b/tools/llvm-upgrade/UpgradeParser.y
index c9b3e6a7dc..8fe549830d 100644
--- a/tools/llvm-upgrade/UpgradeParser.y
+++ b/tools/llvm-upgrade/UpgradeParser.y
@@ -1472,34 +1472,6 @@ upgradeIntrinsicCall(const Type* RetTy, const ValID &ID,
return new FCmpInst(FCmpInst::FCMP_UNO, Args[0], Args[1]);
}
break;
- case 'b':
- if (Name.length() == 14 && !memcmp(&Name[5], "bswap.i", 7)) {
- const Type* ArgTy = Args[0]->getType();
- Name += ".i" + utostr(cast<IntegerType>(ArgTy)->getBitWidth());
- Function *F = cast<Function>(
- CurModule.CurrentModule->getOrInsertFunction(Name, RetTy, ArgTy,
- (void*)0));
- return new CallInst(F, Args[0]);
- }
- break;
- case 'c':
- if ((Name.length() <= 14 && !memcmp(&Name[5], "ctpop.i", 7)) ||
- (Name.length() <= 13 && !memcmp(&Name[5], "ctlz.i", 6)) ||
- (Name.length() <= 13 && !memcmp(&Name[5], "cttz.i", 6))) {
- // These intrinsics changed their result type.
- const Type* ArgTy = Args[0]->getType();
- Function *OldF = CurModule.CurrentModule->getFunction(Name);
- if (OldF)
- OldF->setName("upgrd.rm." + Name);
-
- Function *NewF = cast<Function>(
- CurModule.CurrentModule->getOrInsertFunction(Name, Type::Int32Ty,
- ArgTy, (void*)0));
-
- Instruction *Call = new CallInst(NewF, Args[0], "", CurBB);
- return CastInst::createIntegerCast(Call, RetTy, false);
- }
- break;
case 'v' : {
const Type* PtrTy = PointerType::get(Type::Int8Ty);
diff --git a/utils/TableGen/CodeGenIntrinsics.h b/utils/TableGen/CodeGenIntrinsics.h
index 57f85ad422..8b2ead443f 100644
--- a/utils/TableGen/CodeGenIntrinsics.h
+++ b/utils/TableGen/CodeGenIntrinsics.h
@@ -30,10 +30,6 @@ namespace llvm {
std::string GCCBuiltinName;// Name of the corresponding GCC builtin, or "".
std::string TargetPrefix; // Target prefix, e.g. "ppc" for t-s intrinsics.
- /// ArgTypes - The type primitive enum value for the return value and all
- /// of the arguments. These are things like Type::IntegerTyID.
- std::vector<std::string> ArgTypes;
-
/// ArgVTs - The MVT::ValueType for each argument type. Note that this list
/// is only populated when in the context of a target .td file. When
/// building Intrinsics.td, this isn't available, because we don't know the
diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp
index 21136c4645..78b850b43c 100644
--- a/utils/TableGen/CodeGenTarget.cpp
+++ b/utils/TableGen/CodeGenTarget.cpp
@@ -97,7 +97,7 @@ std::string llvm::getEnumName(MVT::ValueType T) {
case MVT::v2f64: return "MVT::v2f64";
case MVT::v3i32: return "MVT::v3i32";
case MVT::v3f32: return "MVT::v3f32";
- case MVT::iPTR: return "TLI.getPointerTy()";
+ case MVT::iPTR: return "MVT::iPTR";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@@ -651,13 +651,12 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R, CodeGenTarget *CGT) {
for (unsigned i = 0, e = TypeList->getSize(); i != e; ++i) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
- ArgTypes.push_back(TyEl->getValueAsString("TypeVal"));
MVT::ValueType VT = getValueType(TyEl->getValueAsDef("VT"));
isOverloaded |= VT == MVT::iAny;
ArgVTs.push_back(VT);
ArgTypeDefs.push_back(TyEl);
}
- if (ArgTypes.size() == 0)
+ if (ArgVTs.size() == 0)
throw "Intrinsic '"+DefName+"' needs at least a type for the ret value!";
diff --git a/utils/TableGen/IntrinsicEmitter.cpp b/utils/TableGen/IntrinsicEmitter.cpp
index 3c11227785..ebc0c32692 100644
--- a/utils/TableGen/IntrinsicEmitter.cpp
+++ b/utils/TableGen/IntrinsicEmitter.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CodeGenTarget.h"
#include "IntrinsicEmitter.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
@@ -94,12 +95,14 @@ EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
// For overloaded intrinsics, only the prefix needs to match
if (Ints[I->second].isOverloaded)
- OS << " if (Len >= " << I->first.size()
- << " && !memcmp(Name, \"" << I->first << "\", " << I->first.size()
- << ")) return Intrinsic::" << Ints[I->second].EnumName << ";\n";
+ OS << " if (Len > " << I->first.size()
+ << " && !memcmp(Name, \"" << I->first << ".\", "
+ << (I->first.size() + 1) << ")) return Intrinsic::"
+ << Ints[I->second].EnumName << ";\n";
else
OS << " if (Len == " << I->first.size()
- << " && !memcmp(Name, \"" << I->first << "\", Len)) return Intrinsic::"
+ << " && !memcmp(Name, \"" << I->first << "\", "
+ << I->first.size() << ")) return Intrinsic::"
<< Ints[I->second].EnumName << ";\n";
}
OS << " }\n";
@@ -117,50 +120,55 @@ EmitIntrinsicToNameTable(const std::vector<CodeGenIntrinsic> &Ints,
OS << "#endif\n\n";
}
-static bool EmitTypeVerify(std::ostream &OS, Record *ArgType) {
- if (ArgType->getValueAsString("TypeVal") == "...") return true;
-
- OS << "(int)" << ArgType->getValueAsString("TypeVal") << ", ";
- // If this is an integer type, check the width is correct.
- if (ArgType->isSubClassOf("LLVMIntegerType"))
- OS << ArgType->getValueAsInt("Width") << ", ";
-
- // If this is a vector type, check that the subtype and size are correct.
- else if (ArgType->isSubClassOf("LLVMVectorType")) {
- EmitTypeVerify(OS, ArgType->getValueAsDef("ElTy"));
- OS << ArgType->getValueAsInt("NumElts") << ", ";
+static void EmitTypeForValueType(std::ostream &OS, MVT::ValueType VT) {
+ if (MVT::isInteger(VT)) {
+ unsigned BitWidth = MVT::getSizeInBits(VT);
+ OS << "IntegerType::get(" << BitWidth << ")";
+ } else if (VT == MVT::Other) {
+ // MVT::OtherVT is used to mean the empty struct type here.
+ OS << "StructType::get(std::vector<const Type *>())";
+ } else if (VT == MVT::f32) {
+ OS << "Type::FloatTy";
+ } else if (VT == MVT::f64) {
+ OS << "Type::DoubleTy";
+ } else if (VT == MVT::isVoid) {
+ OS << "Type::VoidTy";
+ } else {
+ assert(false && "Unsupported ValueType!");
}
-
- return false;
}
static void EmitTypeGenerate(std::ostream &OS, Record *ArgType,
unsigned &ArgNo) {
- if (ArgType->isSubClassOf("LLVMIntegerType")) {
- unsigned BitWidth = ArgType->getValueAsInt("Width");
+ MVT::ValueType VT = getValueType(ArgType->getValueAsDef("VT"));
+
+ if (ArgType->isSubClassOf("LLVMMatchType")) {
+ unsigned Number = ArgType->getValueAsInt("Number");
+ assert(Number < ArgNo && "Invalid matching number!");
+ OS << "Tys[" << Number << "]";
+ } else if (VT == MVT::iAny) {
// NOTE: The ArgNo variable here is not the absolute argument number, it is
// the index of the "arbitrary" type in the Tys array passed to the
// Intrinsic::getDeclaration function. Consequently, we only want to
- // increment it when we actually hit an arbitrary integer type which is
- // identified by BitWidth == 0. Getting this wrong leads to very subtle
- // bugs!
- if (BitWidth == 0)
- OS << "Tys[" << ArgNo++ << "]";
- else
- OS << "IntegerType::get(" << BitWidth << ")";
- } else if (ArgType->isSubClassOf("LLVMVectorType")) {
+ // increment it when we actually hit an overloaded type. Getting this wrong
+ // leads to very subtle bugs!
+ OS << "Tys[" << ArgNo++ << "]";
+ } else if (MVT::isVector(VT)) {
OS << "VectorType::get(";
- EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
- OS << ", " << ArgType->getValueAsInt("NumElts") << ")";
- } else if (ArgType->isSubClassOf("LLVMPointerType")) {
+ EmitTypeForValueType(OS, MVT::getVectorElementType(VT));
+ OS << ", " << MVT::getVectorNumElements(VT) << ")";
+ } else if (VT == MVT::iPTR) {
OS << "PointerType::get(";
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
- } else if (ArgType->isSubClassOf("LLVMEmptyStructType")) {
- OS << "StructType::get(std::vector<const Type *>())";
+ } else if (VT == MVT::isVoid) {
+ if (ArgNo == 0)
+ OS << "Type::VoidTy";
+ else
+ // MVT::isVoid is used to mean varargs here.
+ OS << "...";
} else {
- OS << "Type::getPrimitiveType(";
- OS << ArgType->getValueAsString("TypeVal") << ")";
+ EmitTypeForValueType(OS, VT);
}
}
@@ -209,18 +217,24 @@ void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
}
const std::vector<Record*> &ArgTypes = I->first;
- OS << " VerifyIntrinsicPrototype(ID, IF, ";
- bool VarArg = false;
+ OS << " VerifyIntrinsicPrototype(ID, IF, " << ArgTypes.size() << ", ";
for (unsigned j = 0; j != ArgTypes.size(); ++j) {
- VarArg = EmitTypeVerify(OS, ArgTypes[j]);
- if (VarArg) {
- if ((j+1) != ArgTypes.size())
+ Record *ArgType = ArgTypes[j];
+ if (ArgType->isSubClassOf("LLVMMatchType")) {
+ unsigned Number = ArgType->getValueAsInt("Number");
+ assert(Number < j && "Invalid matching number!");
+ OS << "~" << Number;
+ } else {
+ MVT::ValueType VT = getValueType(ArgType->getValueAsDef("VT"));
+ OS << getEnumName(VT);
+ if (VT == MVT::isVoid && j != 0 && j != ArgTypes.size()-1)
throw "Var arg type not last argument";
- break;
}
+ if (j != ArgTypes.size()-1)
+ OS << ", ";
}
- OS << (VarArg ? "-2);\n" : "-1);\n");
+ OS << ");\n";
OS << " break;\n";
}
OS << " }\n";
@@ -255,7 +269,8 @@ void IntrinsicEmitter::EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
const std::vector<Record*> &ArgTypes = I->first;
unsigned N = ArgTypes.size();
- if (ArgTypes[N-1]->getValueAsString("TypeVal") == "...") {
+ if (N > 1 &&
+ getValueType(ArgTypes[N-1]->getValueAsDef("VT")) == MVT::isVoid) {
OS << " IsVarArg = true;\n";
--N;
}
diff --git a/utils/TableGen/RegisterInfoEmitter.cpp b/utils/TableGen/RegisterInfoEmitter.cpp
index a8690041bf..06c575da3a 100644
--- a/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/utils/TableGen/RegisterInfoEmitter.cpp
@@ -209,7 +209,7 @@ void RegisterInfoEmitter::run(std::ostream &OS) {
<< " static const MVT::ValueType " << Name
<< "[] = {\n ";
for (unsigned i = 0, e = RC.VTs.size(); i != e; ++i)
- OS << getName(RC.VTs[i]) << ", ";
+ OS << getEnumName(RC.VTs[i]) << ", ";
OS << "MVT::Other\n };\n\n";
}
OS << "} // end anonymous namespace\n\n";