prelegalizercombiner-binop-same-val.mir
2.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
name: or_same
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; Fold: x or x -> x
; CHECK-LABEL: name: or_same
; CHECK: liveins: $x0
; CHECK: %copy:_(s64) = COPY $x0
; CHECK: $x0 = COPY %copy(s64)
; CHECK: RET_ReallyLR implicit $x0
%copy:_(s64) = COPY $x0
%or:_(s64) = G_OR %copy, %copy
$x0 = COPY %or(s64)
RET_ReallyLR implicit $x0
...
---
name: and_same
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; Fold: x and x -> x
; CHECK-LABEL: name: and_same
; CHECK: liveins: $x0
; CHECK: %copy:_(s64) = COPY $x0
; CHECK: $x0 = COPY %copy(s64)
; CHECK: RET_ReallyLR implicit $x0
%copy:_(s64) = COPY $x0
%and:_(s64) = G_AND %copy, %copy
$x0 = COPY %and(s64)
RET_ReallyLR implicit $x0
...
---
name: and_same2
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1
; We can fold when the LHS and RHS are guaranteed to be identical.
; CHECK-LABEL: name: and_same2
; CHECK: liveins: $x0, $x1
; CHECK: %copy1:_(s64) = COPY $x0
; CHECK: %copy2:_(s64) = COPY $x1
; CHECK: %or:_(s64) = G_OR %copy1, %copy2
; CHECK: $x0 = COPY %or(s64)
; CHECK: RET_ReallyLR implicit $x0
%copy1:_(s64) = COPY $x0
%copy2:_(s64) = COPY $x1
%or:_(s64) = G_OR %copy1, %copy2
%same_as_or:_(s64) = COPY %or(s64)
%and:_(s64) = G_AND %or, %same_as_or
$x0 = COPY %and(s64)
RET_ReallyLR implicit $x0
...
---
name: or_and_not_same
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1, $x2
; None of the G_ORs or G_ANDs should be eliminated here, because their LHS
; and RHS values are different.
; CHECK-LABEL: name: or_and_not_same
; CHECK: liveins: $x0, $x1, $x2
; CHECK: %copy1:_(s64) = COPY $x0
; CHECK: %copy2:_(s64) = COPY $x1
; CHECK: %copy3:_(s64) = COPY $x2
; CHECK: %or1:_(s64) = G_OR %copy1, %copy2
; CHECK: %or2:_(s64) = G_OR %copy1, %copy3
; CHECK: %and:_(s64) = G_AND %or1, %or2
; CHECK: $x0 = COPY %and(s64)
; CHECK: RET_ReallyLR implicit $x0
%copy1:_(s64) = COPY $x0
%copy2:_(s64) = COPY $x1
%copy3:_(s64) = COPY $x2
%or1:_(s64) = G_OR %copy1, %copy2
%or2:_(s64) = G_OR %copy1, %copy3
%and:_(s64) = G_AND %or1, %or2
$x0 = COPY %and(s64)
RET_ReallyLR implicit $x0
...