inflate-regs.ll
1.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
target triple = "thumbv7-apple-ios"
; CHECK: local_split
;
; The load must go into d0-15 which are all clobbered by the asm.
; RAGreedy should split the range and use d16-d31 to avoid a spill.
;
; CHECK: vldr s
; CHECK-NOT: vstr
; CHECK: vadd.f32
; CHECK-NOT: vstr
; CHECK: vorr
; CHECK: vstr s
define void @local_split(float* nocapture %p) nounwind ssp {
entry:
%x = load float, float* %p, align 4
%a = fadd float %x, 1.0
tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
store float %a, float* %p, align 4
ret void
}
; CHECK: global_split
;
; Same thing, but across basic blocks.
;
; CHECK: vldr s
; CHECK-NOT: vstr
; CHECK: vadd.f32
; CHECK-NOT: vstr
; CHECK: vorr
; CHECK: vstr s
define void @global_split(float* nocapture %p1, float* nocapture %p2) nounwind ssp {
entry:
%0 = load float, float* %p1, align 4
%add = fadd float %0, 1.000000e+00
tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
%cmp = fcmp ogt float %add, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
if.then:
store float %add, float* %p2, align 4
br label %if.end
if.end:
store float %add, float* %p1, align 4
ret void
}