transform-patterns.mlir
20.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
// RUN: mlir-opt %s -test-linalg-transform-patterns | FileCheck %s
// CHECK-DAG: #[[STRIDED_1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK-DAG: #[[kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK-DAG: #[[mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
// CHECK-DAG: #[[nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
// CHECK-DAG: #[[km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>,
%v: memref<f32>) {
linalg.dot(%x, %y, %v) : memref<?xf32, offset: ?, strides: [1]>,
memref<?xf32, offset: ?, strides: [1]>,
memref<f32>
return
}
// CHECK-LABEL: func @dot
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c8:.*]] = constant 8 : index
// CHECK-DAG : %[[c8000:.*]] = constant 8000 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c8000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c8]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c1]] {
// CHECK : load
// CHECK : load
// CHECK : mulf
// CHECK : load
// CHECK : addf
// CHECK : store
func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>) {
linalg.matvec(%A, %x, %y) : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?xf32, offset: ?, strides: [1]>,
memref<?xf32, offset: ?, strides: [1]>
return
}
// CHECK-LABEL: func @matvec
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c5:.*]] = constant 5 : index
// CHECK-DAG : %[[c6:.*]] = constant 6 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]]
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]]
// CHECK : linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
linalg.matmul(%A, %B, %C) : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
return
}
// CHECK-LABEL: func @matmul
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c2:.*]] = constant 2 : index
// CHECK-DAG : %[[c3:.*]] = constant 3 : index
// CHECK-DAG : %[[c4:.*]] = constant 4 : index
// CHECK-DAG : %[[c20:.*]] = constant 20 : index
// CHECK-DAG : %[[c30:.*]] = constant 30 : index
// CHECK-DAG : %[[c40:.*]] = constant 40 : index
// CHECK-DAG : %[[c200:.*]] = constant 200 : index
// CHECK-DAG : %[[c300:.*]] = constant 300 : index
// CHECK-DAG : %[[c400:.*]] = constant 400 : index
// CHECK-DAG : %[[c2000:.*]] = constant 2000 : index
// CHECK-DAG : %[[c3000:.*]] = constant 3000 : index
// CHECK-DAG : %[[c4000:.*]] = constant 4000 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c200]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c300]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c400]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] {
// CHECK : linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
#some_generic_trait = {
args_in = 1,
args_out = 1,
indexing_maps = [
affine_map<(i, j) -> (i, j)>,
affine_map<(i, j) -> (i, j)>
],
iterator_types = ["parallel", "parallel"]
}
func @fusion_test(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%C: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%D: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%E: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
// This should not be fused as it would violate dependencies. It will get
// tiled for all levels of the memory hierarchy.
linalg.matmul(%A, %A, %C) : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
// This should be fused.
linalg.matmul(%A, %B, %C) : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
// This should not be fused or transformed at all since there are no patterns
// on it. However it will be reordered because there are no dependencies.
linalg.generic #some_generic_trait %A, %D {
^bb(%a: f32, %b: f32) :
linalg.yield %a : f32
} : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
linalg.matmul(%C, %D, %E) : memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
return
}
// CHECK-LABEL: func @fusion_test
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c2:.*]] = constant 2 : index
// CHECK-DAG : %[[c3:.*]] = constant 3 : index
// CHECK-DAG : %[[c4:.*]] = constant 4 : index
// CHECK-DAG : %[[c20:.*]] = constant 20 : index
// CHECK-DAG : %[[c30:.*]] = constant 30 : index
// CHECK-DAG : %[[c40:.*]] = constant 40 : index
// CHECK-DAG : %[[c100:.*]] = constant 100 : index
// CHECK-DAG : %[[c150:.*]] = constant 150 : index
// CHECK-DAG : %[[c200:.*]] = constant 200 : index
// CHECK-DAG : %[[c300:.*]] = constant 300 : index
// CHECK-DAG : %[[c400:.*]] = constant 400 : index
// CHECK-DAG : %[[c2000:.*]] = constant 2000 : index
// CHECK-DAG : %[[c3000:.*]] = constant 3000 : index
// CHECK-DAG : %[[c4000:.*]] = constant 4000 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c200]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c300]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c400]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] {
// CHECK : linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
//
// CHECK : linalg.generic
//
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c100]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c150]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c2]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c3]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c4]] {
// CHECK : linalg.matmul(%{{.*}}, %{{.*}}, %{{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c2]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c3]] {
// CHECK : loop.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c4]] {
// CHECK : linalg.matmul(%{{.*}}, %{{.*}}, %{{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
#matmul_trait = {
args_in = 2,
args_out = 1,
indexing_maps = [
affine_map<(m, n, k) -> (m, k)>,
affine_map<(m, n, k) -> (k, n)>,
affine_map<(m, n, k) -> (m, n)>
],
iterator_types = ["parallel", "parallel", "reduction"],
__internal_linalg_transform__ = "_marked_matmul_"
}
func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
%C: memref<8x32xf32>) {
linalg.generic #matmul_trait %A, %B, %C {
^bb(%a: f32, %b: f32, %c: f32) :
%d = mulf %a, %b: f32
%e = addf %c, %d: f32
linalg.yield %e : f32
} : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32>
return
}
// CHECK-LABEL: func @vectorization_test
// CHECK: vector.type_cast %{{.*}} : memref<8x16xf32> to memref<vector<8x16xf32>>
// CHECK: load %{{.*}}[] : memref<vector<8x16xf32>>
// CHECK: vector.type_cast %{{.*}} : memref<16x32xf32> to memref<vector<16x32xf32>>
// CHECK: load %{{.*}}[] : memref<vector<16x32xf32>>
// CHECK: vector.type_cast %{{.*}} : memref<8x32xf32> to memref<vector<8x32xf32>>
// CHECK: load %{{.*}}[] : memref<vector<8x32xf32>>
// CHECK: vector.contract {indexing_maps = [#[[mk]], #[[kn]], #[[mn]]], iterator_types = ["parallel", "parallel", "reduction"]} %{{.*}}, %{{.*}}, %{{.*}} : vector<8x16xf32>, vector<16x32xf32> into vector<8x32xf32>
// CHECK: store %{{.*}}, %{{.*}}[] : memref<vector<8x32xf32>>
func @fma(%a: f32, %b: f32, %c: f32) -> f32 {
%d = mulf %a, %b: f32
%e = addf %c, %d: f32
return %e: f32
}
#matmul_accesses = [
affine_map<(m, n, k) -> (m, k)>,
affine_map<(m, n, k) -> (k, n)>,
affine_map<(m, n, k) -> (m, n)>
]
#generic_matmul_trait = {
args_in = 2,
args_out = 1,
fun = @fma,
indexing_maps = #matmul_accesses,
library_call = "linalg_matmul",
iterator_types = ["parallel", "parallel", "reduction"]
}
func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
linalg.generic #generic_matmul_trait %A, %B, %C : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>
return
}
// CHECK-LABEL : func @fma
// CHECK-LABEL : func @permute_generic
// CHECK : linalg.generic {args_in = 2, args_out = 1, fun = @fma, indexing_maps = [#[[kn]], #[[nm]], #[[km]]], iterator_types = ["parallel", "reduction", "parallel"], library_call = "linalg_matmul"} %{{.*}}, %{{.*}}, %{{.*}} : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
func @fma_indexed(%i: index, %j: index, %k: index, %a: f32, %b: f32, %c: f32) -> f32 {
%d = mulf %a, %b: f32
%e = addf %c, %d: f32
return %e: f32
}
#indexed_matmul_trait = {
args_in = 2,
args_out = 1,
fun = @fma_indexed,
indexing_maps = #matmul_accesses,
library_call = "linalg_matmul_indexed",
iterator_types = ["parallel", "parallel", "reduction"]
}
func @permute_generic_indexed(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
linalg.indexed_generic #indexed_matmul_trait %A, %B, %C : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>
return
}
// CHECK-LABEL : func @fma_indexed
// CHECK-LABEL : func @permute_generic_indexed
// CHECK : linalg.indexed_generic {args_in = 2, args_out = 1, fun = @fma, indexing_maps = [#[[kn]], #[[nm]], #[[km]]], iterator_types = ["parallel", "reduction", "parallel"], library_call = "linalg_matmul_indexed"} %{{.*}}, %{{.*}}, %{{.*}} : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
func @dot_perm(%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>,
%v: memref<f32>) {
linalg.dot(%x, %y, %v) {__internal_linalg_transform__ = "__with_perm__"} :
memref<?xf32, offset: ?, strides: [1]>,
memref<?xf32, offset: ?, strides: [1]>,
memref<f32>
return
}
// CHECK-LABEL: func @dot_perm
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c8:.*]] = constant 8 : index
// CHECK-DAG : %[[c8000:.*]] = constant 8000 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c8000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c8]] {
// CHECK : linalg.dot({{.*}}, {{.*}}, {{.*}}) : memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<f32>
func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>) {
linalg.matvec(%A, %x, %y) {__internal_linalg_transform__ = "__with_perm__"} :
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?xf32, offset: ?, strides: [1]>,
memref<?xf32, offset: ?, strides: [1]>
return
}
// CHECK-LABEL: func @matvec_perm
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c5:.*]] = constant 5 : index
// CHECK-DAG : %[[c6:.*]] = constant 6 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]]
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]]
// CHECK : linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
linalg.matmul(%A, %B, %C) {__internal_linalg_transform__ = "__with_perm__"} :
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>
return
}
// CHECK-LABEL: func @matmul_perm
// CHECK-DAG : %[[c0:.*]] = constant 0 : index
// CHECK-DAG : %[[c2:.*]] = constant 2 : index
// CHECK-DAG : %[[c3:.*]] = constant 3 : index
// CHECK-DAG : %[[c4:.*]] = constant 4 : index
// CHECK-DAG : %[[c20:.*]] = constant 20 : index
// CHECK-DAG : %[[c30:.*]] = constant 30 : index
// CHECK-DAG : %[[c40:.*]] = constant 40 : index
// CHECK-DAG : %[[c200:.*]] = constant 200 : index
// CHECK-DAG : %[[c300:.*]] = constant 300 : index
// CHECK-DAG : %[[c400:.*]] = constant 400 : index
// CHECK-DAG : %[[c2000:.*]] = constant 2000 : index
// CHECK-DAG : %[[c3000:.*]] = constant 3000 : index
// CHECK-DAG : %[[c4000:.*]] = constant 4000 : index
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c300]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c200]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c400]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
// CHECK : linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
%c2000 = constant 2000 : index
%c3000 = constant 3000 : index
%c4000 = constant 4000 : index
%c0 = constant 0 : index
%c1 = constant 1 : index
%0 = dim %arg0, 0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
%1 = dim %arg0, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
%2 = dim %arg1, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
loop.for %arg3 = %c0 to %0 step %c2000 {
loop.for %arg4 = %c0 to %2 step %c3000 {
loop.for %arg5 = %c0 to %1 step %c4000 {
%3 = std.subview %arg0[%arg3, %arg5][%c2000, %c4000][%c1, %c1] :
memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
%4 = std.subview %arg1[%arg5, %arg4][%c4000, %c3000][%c1, %c1] :
memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
%5 = std.subview %arg2[%arg3, %arg4][%c2000, %c3000][%c1, %c1] :
memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
linalg.matmul(%3, %4, %5) {__internal_linalg_transform__ = "_promote_views_"} :
memref<?x?xf32, offset: ?, strides: [?, ?]>,
memref<?x?xf32, offset: ?, strides: [?, ?]>,
memref<?x?xf32, offset: ?, strides: [?, ?]>
}
}
}
return
}
// CHECK-LABEL: func @promote_subview_matmul
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
// CHECK : %[[s0:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK : %[[s1:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK : %[[s2:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK : %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK : %[[v0:.*]] = std.view %[[a0]][][{{%.*}}, {{%.*}}]: memref<?xi8> to memref<?x?xf32>
// CHECK : %[[l0:.*]] = linalg.slice %[[v0]][{{%.*}}, {{%.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #map{{.*}}>
// CHECK : %[[a1:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK : %[[v1:.*]] = std.view %[[a1]][][{{%.*}}, {{%.*}}]: memref<?xi8> to memref<?x?xf32>
// CHECK : %[[l1:.*]] = linalg.slice %[[v1]][{{%.*}}, {{%.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #map{{.*}}>
// CHECK : %[[a2:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK : %[[v2:.*]] = std.view %[[a2]][][{{%.*}}, {{%.*}}]: memref<?xi8> to memref<?x?xf32>
// CHECK : %[[l2:.*]] = linalg.slice %[[v2]][{{%.*}}, {{%.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #map{{.*}}>
// CHECK : linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK : linalg.copy(%[[s1]], %[[l1]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK : linalg.copy(%[[s2]], %[[l2]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK : linalg.matmul(%[[v0]], %[[v1]], %[[v2]]) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>