summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp46
-rw-r--r--test/CodeGen/Mips/hf16call32_body.ll294
2 files changed, 337 insertions, 3 deletions
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index cc7324f26e..45dd5d7957 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -320,7 +320,7 @@ static void assureFPCallStub(Function &F, Module *M,
//
// Returns of float, double and complex need to be handled with a helper
-// function. The "AndCal" part is coming in a later patch.
+// function.
//
static bool fixupFPReturnAndCall
(Function &F, Module *M, const MipsSubtarget &Subtarget) {
@@ -378,6 +378,41 @@ static bool fixupFPReturnAndCall
return Modified;
}
+static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
+ const MipsSubtarget &Subtarget ) {
+ bool PicMode = Subtarget.getRelocationModel() == Reloc::PIC_;
+ bool LE = Subtarget.isLittle();
+ LLVMContext &Context = M->getContext();
+ std::string Name = F->getName();
+ std::string SectionName = ".mips16.fn." + Name;
+ std::string StubName = "__fn_stub_" + Name;
+ std::string LocalName = "__fn_local_" + Name;
+ Function *FStub = Function::Create
+ (F->getFunctionType(),
+ Function::ExternalLinkage, StubName, M);
+ FStub->addFnAttr("mips16_fp_stub");
+ FStub->addFnAttr(llvm::Attribute::Naked);
+ FStub->addFnAttr(llvm::Attribute::NoUnwind);
+ FStub->addFnAttr("nomips16");
+ FStub->setSection(SectionName);
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+ InlineAsmHelper IAH(Context, BB);
+ IAH.Out(" .set macro");
+ if (PicMode) {
+ IAH.Out(".set noreorder");
+ IAH.Out(".cpload $$2");
+ IAH.Out(".set reorder");
+ IAH.Out(".reloc 0,R_MIPS_NONE," + Name);
+ IAH.Out("la $$25," + LocalName);
+ }
+ else
+ IAH.Out("la $$25, " + Name);
+ swapFPIntParams(PV, M, IAH, LE, false);
+ IAH.Out("jr $$25");
+ IAH.Out(LocalName + " = " + Name);
+ new UnreachableInst(FStub->getContext(), BB);
+}
+
namespace llvm {
//
@@ -389,10 +424,10 @@ namespace llvm {
// by calling a helper function before the actual return.
// 2) generate helper functions (stubs) that can be called by mips32 functions
// that will move parameters passed normally passed in floating point
-// registers the soft float equivalents. (Coming in a later patch).
+// registers the soft float equivalents.
// 3) in the case of static relocation, generate helper functions so that
// mips16 functions can call extern functions of unknown type (mips16 or
-// mips32). (Coming in a later patch).
+// mips32).
// 4) TBD. For pic, calls to extern functions of unknown type are handled by
// predefined helper functions in libc but this work is currently done
// during call lowering but it should be moved here in the future.
@@ -404,6 +439,11 @@ bool Mips16HardFloat::runOnModule(Module &M) {
if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
F->hasFnAttribute("nomips16")) continue;
Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+ FPParamVariant V = whichFPParamVariantNeeded(*F);
+ if (V != NoSig) {
+ Modified = true;
+ createFPFnStub(F, &M, V, Subtarget);
+ }
}
return Modified;
}
diff --git a/test/CodeGen/Mips/hf16call32_body.ll b/test/CodeGen/Mips/hf16call32_body.ll
new file mode 100644
index 0000000000..793b771ac4
--- /dev/null
+++ b/test/CodeGen/Mips/hf16call32_body.ll
@@ -0,0 +1,294 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+
+@x = external global float
+@xd = external global double
+@y = external global float
+@yd = external global double
+@ret_sf = external global float
+@ret_df = external global double
+@ret_sc = external global { float, float }
+@ret_dc = external global { double, double }
+
+; Function Attrs: nounwind
+define void @v_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ ret void
+}
+; stel: .section .mips16.fn.v_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_sf
+; stel: la $25, v_sf
+; stel: mfc1 $4,$f12
+; stel: jr $25
+; stel: __fn_local_v_sf = v_sf
+; stel: .end __fn_stub_v_sf
+
+declare i32 @printf(i8*, ...) #1
+
+; Function Attrs: nounwind
+define void @v_df(double %p) #0 {
+entry:
+ %p.addr = alloca double, align 8
+ store double %p, double* %p.addr, align 8
+ %0 = load double* %p.addr, align 8
+ store double %0, double* @xd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df,"ax",@progbits
+; stel: .ent __fn_stub_v_df
+; stel: la $25, v_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: jr $25
+; stel: __fn_local_v_df = v_df
+; stel: .end __fn_stub_v_df
+
+; Function Attrs: nounwind
+define void @v_sf_sf(float %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca float, align 4
+ store float %p1, float* %p1.addr, align 4
+ store float %p2, float* %p2.addr, align 4
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ ret void
+}
+
+; stel: .section .mips16.fn.v_sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_sf_sf
+; stel: la $25, v_sf_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f14
+; stel: jr $25
+; stel: __fn_local_v_sf_sf = v_sf_sf
+; stel: .end __fn_stub_v_sf_sf
+
+; Function Attrs: nounwind
+define void @v_sf_df(float %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca double, align 8
+ store float %p1, float* %p1.addr, align 4
+ store double %p2, double* %p2.addr, align 8
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_sf_df,"ax",@progbits
+; stel: .ent __fn_stub_v_sf_df
+; stel: la $25, v_sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_v_sf_df = v_sf_df
+; stel: .end __fn_stub_v_sf_df
+
+; Function Attrs: nounwind
+define void @v_df_sf(double %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca float, align 4
+ store double %p1, double* %p1.addr, align 8
+ store float %p2, float* %p2.addr, align 4
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_df_sf
+; stel: la $25, v_df_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: jr $25
+; stel: __fn_local_v_df_sf = v_df_sf
+; stel: .end __fn_stub_v_df_sf
+
+; Function Attrs: nounwind
+define void @v_df_df(double %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca double, align 8
+ store double %p1, double* %p1.addr, align 8
+ store double %p2, double* %p2.addr, align 8
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df_df,"ax",@progbits
+; stel: .ent __fn_stub_v_df_df
+; stel: la $25, v_df_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_v_df_df = v_df_df
+; stel: .end __fn_stub_v_df_df
+
+; Function Attrs: nounwind
+define float @sf_v() #0 {
+entry:
+ %0 = load float* @ret_sf, align 4
+ ret float %0
+}
+
+; Function Attrs: nounwind
+define float @sf_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* @ret_sf, align 4
+ ret float %1
+}
+
+
+; stel: .section .mips16.fn.sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf
+; stel: la $25, sf_sf
+; stel: mfc1 $4,$f12
+; stel: jr $25
+; stel: __fn_local_sf_sf = sf_sf
+; stel: .end __fn_stub_sf_sf
+
+
+; Function Attrs: nounwind
+define float @sf_df(double %p) #0 {
+entry:
+ %p.addr = alloca double, align 8
+ store double %p, double* %p.addr, align 8
+ %0 = load double* %p.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* @ret_sf, align 4
+ ret float %1
+}
+
+; stel: .section .mips16.fn.sf_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_df
+; stel: la $25, sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: jr $25
+; stel: __fn_local_sf_df = sf_df
+; stel: .end __fn_stub_sf_df
+
+; Function Attrs: nounwind
+define float @sf_sf_sf(float %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca float, align 4
+ store float %p1, float* %p1.addr, align 4
+ store float %p2, float* %p2.addr, align 4
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf_sf
+; stel: la $25, sf_sf_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f14
+; stel: jr $25
+; stel: __fn_local_sf_sf_sf = sf_sf_sf
+; stel: .end __fn_stub_sf_sf_sf
+
+; Function Attrs: nounwind
+define float @sf_sf_df(float %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca double, align 8
+ store float %p1, float* %p1.addr, align 4
+ store double %p2, double* %p2.addr, align 8
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_sf_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf_df
+; stel: la $25, sf_sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_sf_sf_df = sf_sf_df
+; stel: .end __fn_stub_sf_sf_df
+
+; Function Attrs: nounwind
+define float @sf_df_sf(double %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca float, align 4
+ store double %p1, double* %p1.addr, align 8
+ store float %p2, float* %p2.addr, align 4
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_df_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_df_sf
+; stel: la $25, sf_df_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: jr $25
+; stel: __fn_local_sf_df_sf = sf_df_sf
+; stel: .end __fn_stub_sf_df_sf
+
+; Function Attrs: nounwind
+define float @sf_df_df(double %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca double, align 8
+ store double %p1, double* %p1.addr, align 8
+ store double %p2, double* %p2.addr, align 8
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_df_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_df_df
+; stel: la $25, sf_df_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_sf_df_df = sf_df_df
+; stel: .end __fn_stub_sf_df_df
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }