divrem-combine.ll
3.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
; RUN: llc -O2 < %s -march=nvptx -mcpu=sm_35 | FileCheck %s --check-prefix=O2 --check-prefix=CHECK
; RUN: llc -O0 < %s -march=nvptx -mcpu=sm_35 | FileCheck %s --check-prefix=O0 --check-prefix=CHECK
; The following IR
;
; quot = n / d
; rem = n % d
;
; should be transformed into
;
; quot = n / d
; rem = n - (n / d) * d
;
; during NVPTX isel, at -O2. At -O0, we should leave it alone.
; CHECK-LABEL: sdiv32(
define void @sdiv32(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.s32 [[quot:%r[0-9]+]], [[num:%r[0-9]+]], [[den:%r[0-9]+]];
%quot = sdiv i32 %n, %d
; O0: rem.s32
; (This is unfortunately order-sensitive, even though mul is commutative.)
; O2: mul.lo.s32 [[mul:%r[0-9]+]], [[quot]], [[den]];
; O2: sub.s32 [[rem:%r[0-9]+]], [[num]], [[mul]]
%rem = srem i32 %n, %d
; O2: st{{.*}}[[quot]]
store i32 %quot, i32* %quot_ret
; O2: st{{.*}}[[rem]]
store i32 %rem, i32* %rem_ret
ret void
}
; CHECK-LABEL: udiv32(
define void @udiv32(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.u32 [[quot:%r[0-9]+]], [[num:%r[0-9]+]], [[den:%r[0-9]+]];
%quot = udiv i32 %n, %d
; O0: rem.u32
; Selection DAG doesn't know whether this is signed or unsigned
; multiplication and subtraction, but it doesn't make a difference either
; way.
; O2: mul.lo.{{u|s}}32 [[mul:%r[0-9]+]], [[quot]], [[den]];
; O2: sub.{{u|s}}32 [[rem:%r[0-9]+]], [[num]], [[mul]]
%rem = urem i32 %n, %d
; O2: st{{.*}}[[quot]]
store i32 %quot, i32* %quot_ret
; O2: st{{.*}}[[rem]]
store i32 %rem, i32* %rem_ret
ret void
}
; Check that we don't perform this optimization if one operation is signed and
; the other isn't.
; CHECK-LABEL: mismatched_types1(
define void @mismatched_types1(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.u32
; CHECK: rem.s32
%quot = udiv i32 %n, %d
%rem = srem i32 %n, %d
store i32 %quot, i32* %quot_ret
store i32 %rem, i32* %rem_ret
ret void
}
; CHECK-LABEL: mismatched_types2(
define void @mismatched_types2(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.s32
; CHECK: rem.u32
%quot = sdiv i32 %n, %d
%rem = urem i32 %n, %d
store i32 %quot, i32* %quot_ret
store i32 %rem, i32* %rem_ret
ret void
}
; Check that we don't perform this optimization if the inputs to the div don't
; match the inputs to the rem.
; CHECK-LABEL: mismatched_inputs1(
define void @mismatched_inputs1(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.s32
; CHECK: rem.s32
%quot = sdiv i32 %n, %d
%rem = srem i32 %d, %n
store i32 %quot, i32* %quot_ret
store i32 %rem, i32* %rem_ret
ret void
}
; CHECK-LABEL: mismatched_inputs2(
define void @mismatched_inputs2(i32 %n1, i32 %n2, i32 %d, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.s32
; CHECK: rem.s32
%quot = sdiv i32 %n1, %d
%rem = srem i32 %n2, %d
store i32 %quot, i32* %quot_ret
store i32 %rem, i32* %rem_ret
ret void
}
; CHECK-LABEL: mismatched_inputs3(
define void @mismatched_inputs3(i32 %n, i32 %d1, i32 %d2, i32* %quot_ret, i32* %rem_ret) {
; CHECK: div.s32
; CHECK: rem.s32
%quot = sdiv i32 %n, %d1
%rem = srem i32 %n, %d2
store i32 %quot, i32* %quot_ret
store i32 %rem, i32* %rem_ret
ret void
}