sve-cast-of-alloc.ll
7.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S < %s 2>%t | FileCheck %s
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
; WARN-NOT: warning
define void @fixed_array16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i32], align 16
; CHECK-NEXT: [[CAST:%.*]] = bitcast [16 x i32]* [[TMP]] to <vscale x 4 x i32>*
; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca [16 x i32], align 16
%cast = bitcast [16 x i32]* %tmp to <vscale x 4 x i32>*
store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
%reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
ret void
}
define void @scalable4i32_to_fixed16i32(<16 x i32>* %out) {
; CHECK-LABEL: @scalable4i32_to_fixed16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 4 x i32>* [[TMP]] to <16 x i32>*
; CHECK-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 4 x i32>, align 16
%cast = bitcast <vscale x 4 x i32>* %tmp to <16 x i32>*
store <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
%reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
store <16 x i32> %reload, <16 x i32>* %out, align 16
ret void
}
define void @fixed16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
; CHECK-LABEL: @fixed16i32_to_scalable4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <16 x i32>, align 16
; CHECK-NEXT: [[CAST:%.*]] = bitcast <16 x i32>* [[TMP]] to <vscale x 4 x i32>*
; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <16 x i32>, align 16
%cast = bitcast <16 x i32>* %tmp to <vscale x 4 x i32>*
store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
%reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
ret void
}
define void @scalable16i32_to_fixed16i32(<16 x i32>* %out) {
; CHECK-LABEL: @scalable16i32_to_fixed16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 16 x i32>* [[TMP]] to <16 x i32>*
; CHECK-NEXT: store volatile <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 16 x i32>, align 16
%cast = bitcast <vscale x 16 x i32>* %tmp to <16 x i32>*
store volatile <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
%reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
store <16 x i32> %reload, <16 x i32>* %out, align 16
ret void
}
define void @scalable32i32_to_scalable16i32(<vscale x 16 x i32>* %out) {
; CHECK-LABEL: @scalable32i32_to_scalable16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 32 x i32>* [[TMP]] to <vscale x 16 x i32>*
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i32>, align 16
%cast = bitcast <vscale x 32 x i32>* %tmp to <vscale x 16 x i32>*
store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
%reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
ret void
}
define void @scalable32i16_to_scalable16i32(<vscale x 16 x i32>* %out) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[TMP]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i16>, align 16
%cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
%reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
ret void
}
define void @scalable32i16_to_scalable16i32_multiuse(<vscale x 16 x i32>* %out, <vscale x 32 x i16>* %out2) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 32 x i16>* [[TMP]] to <vscale x 16 x i32>*
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
; CHECK-NEXT: [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* [[TMP]], align 64
; CHECK-NEXT: store <vscale x 32 x i16> [[RELOAD2]], <vscale x 32 x i16>* [[OUT2:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i16>, align 16
%cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
%reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
%reload2 = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* %tmp, align 16
store <vscale x 32 x i16> %reload2, <vscale x 32 x i16>* %out2, align 16
ret void
}