non-integral-pointers.ll
4.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
target triple = "x86_64-unknown-linux-gnu"
define i8 addrspace(4)* @f_0() {
; CHECK-LABEL: @f_0(
; CHECK-NEXT: ret i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* null, i64 50)
;
%result = getelementptr i8, i8 addrspace(4)* null, i64 50
ret i8 addrspace(4)* %result
}
define i8 addrspace(3)* @f_1() {
; inttoptr is fine here since addrspace(3) is integral.
; CHECK-LABEL: @f_1(
; CHECK-NEXT: ret i8 addrspace(3)* inttoptr (i64 50 to i8 addrspace(3)*)
;
%result = getelementptr i8, i8 addrspace(3)* null, i64 50
ret i8 addrspace(3)* %result
}
define void @f_2(i8 addrspace(4)** %ptr0, i8 addrspace(4)** %ptr1) {
; It is not okay to convert the load/store pair to load and store
; integers, since pointers in address space 4 are non-integral.
; CHECK-LABEL: @f_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAL:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[PTR0:%.*]], align 8
; CHECK-NEXT: store i8 addrspace(4)* [[VAL]], i8 addrspace(4)** [[PTR1:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
%val = load i8 addrspace(4)*, i8 addrspace(4)** %ptr0
store i8 addrspace(4)* %val, i8 addrspace(4)** %ptr1
ret void
}
define void @f_3(i8 addrspace(3)** %ptr0, i8 addrspace(3)** %ptr1) {
; It *is* okay to convert the load/store pair to load and store
; integers, since pointers in address space 3 are integral.
; CHECK-LABEL: @f_3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8 addrspace(3)** [[PTR0:%.*]] to i64*
; CHECK-NEXT: [[VAL1:%.*]] = load i64, i64* [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 addrspace(3)** [[PTR1:%.*]] to i64*
; CHECK-NEXT: store i64 [[VAL1]], i64* [[TMP1]], align 8
; CHECK-NEXT: ret void
;
entry:
%val = load i8 addrspace(3)*, i8 addrspace(3)** %ptr0
store i8 addrspace(3)* %val, i8 addrspace(3)** %ptr1
ret void
}
define i64 @g(i8 addrspace(4)** %gp) {
; CHECK-LABEL: @g(
; CHECK-NEXT: [[DOTPRE:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[GP:%.*]], align 8
; CHECK-NEXT: [[V74:%.*]] = call i8 addrspace(4)* @alloc()
; CHECK-NEXT: [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i8 addrspace(4)* addrspace(4)*
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* addrspace(4)* [[TMP1]] to i8 addrspace(4)**
; CHECK-NEXT: store i8 addrspace(4)* [[DOTPRE]], i8 addrspace(4)** [[TMP2]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i64 addrspace(4)*
; CHECK-NEXT: [[V80:%.*]] = addrspacecast i64 addrspace(4)* [[TMP3]] to i64*
; CHECK-NEXT: [[V81:%.*]] = load i64, i64* [[V80]], align 8
; CHECK-NEXT: ret i64 [[V81]]
;
%.pre = load i8 addrspace(4)*, i8 addrspace(4)** %gp, align 8
%v74 = call i8 addrspace(4)* @alloc()
%v75 = addrspacecast i8 addrspace(4)* %v74 to i8*
%v76 = bitcast i8* %v75 to i8 addrspace(4)**
%v77 = getelementptr i8 addrspace(4)*, i8 addrspace(4)** %v76, i64 -1
store i8 addrspace(4)* %.pre, i8 addrspace(4)** %v77, align 8
%v80 = bitcast i8 addrspace(4)** %v77 to i64*
%v81 = load i64, i64* %v80, align 8
ret i64 %v81
}
define i64 @g2(i8* addrspace(4)* %gp) {
; CHECK-LABEL: @g2(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* addrspace(4)* [[GP:%.*]] to i64 addrspace(4)*
; CHECK-NEXT: [[DOTPRE1:%.*]] = load i64, i64 addrspace(4)* [[TMP1]], align 8
; CHECK-NEXT: [[V74:%.*]] = call i8 addrspace(4)* @alloc()
; CHECK-NEXT: [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i64 addrspace(4)*
; CHECK-NEXT: store i64 [[DOTPRE1]], i64 addrspace(4)* [[TMP2]], align 8
; CHECK-NEXT: ret i64 [[DOTPRE1]]
;
%.pre = load i8*, i8* addrspace(4)* %gp, align 8
%v74 = call i8 addrspace(4)* @alloc()
%v76 = bitcast i8 addrspace(4)* %v74 to i8* addrspace(4)*
%v77 = getelementptr i8*, i8* addrspace(4)* %v76, i64 -1
store i8* %.pre, i8* addrspace(4)* %v77, align 8
%v80 = bitcast i8* addrspace(4)* %v77 to i64 addrspace(4)*
%v81 = load i64, i64 addrspace(4)* %v80, align 8
ret i64 %v81
}
declare i8 addrspace(4)* @alloc()
define i64 @f_4(i8 addrspace(4)* %v0) {
; CHECK-LABEL: @f_4(
; CHECK-NEXT: [[V6:%.*]] = call i64 bitcast (i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*)(i8 addrspace(4)* [[V0:%.*]])
; CHECK-NEXT: ret i64 [[V6]]
;
%v5 = bitcast i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*
%v6 = call i64 %v5(i8 addrspace(4)* %v0)
ret i64 %v6
}
declare i64 @f_5(i64)