legalize-add.mir 6.2 KB
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
---
name:            test_scalar_add_big
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_scalar_add_big
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
    ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDO1]]
    ; CHECK: $x0 = COPY [[UADDO]](s64)
    ; CHECK: $x1 = COPY [[UADDE]](s64)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s64) = COPY $x2
    %3:_(s64) = COPY $x3
    %4:_(s128) = G_MERGE_VALUES %0(s64), %1(s64)
    %5:_(s128) = G_MERGE_VALUES %2(s64), %3(s64)
    %6:_(s128) = G_ADD %4, %5
    %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128)
    $x0 = COPY %7(s64)
    $x1 = COPY %8(s64)

...
---
name:            test_scalar_add_big_nonpow2
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_scalar_add_big_nonpow2
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
    ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDO1]]
    ; CHECK: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY2]], [[COPY3]], [[UADDE1]]
    ; CHECK: $x0 = COPY [[UADDO]](s64)
    ; CHECK: $x1 = COPY [[UADDE]](s64)
    ; CHECK: $x2 = COPY [[UADDE2]](s64)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s64) = COPY $x2
    %3:_(s64) = COPY $x3
    %4:_(s192) = G_MERGE_VALUES %0(s64), %1(s64), %2(s64)
    %5:_(s192) = G_MERGE_VALUES %1(s64), %2(s64), %3(s64)
    %6:_(s192) = G_ADD %4, %5
    %7:_(s64), %8:_(s64), %9:_(s64) = G_UNMERGE_VALUES %6(s192)
    $x0 = COPY %7(s64)
    $x1 = COPY %8(s64)
    $x2 = COPY %9(s64)

...
---
name:            test_scalar_add_small
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_scalar_add_small
    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
    %0:_(s64) = COPY $x0
    %1:_(s64) = COPY $x1
    %2:_(s8) = G_TRUNC %0(s64)
    %3:_(s8) = G_TRUNC %1(s64)
    %4:_(s8) = G_ADD %2, %3
    %5:_(s64) = G_ANYEXT %4(s8)
    $x0 = COPY %5(s64)

...
---
name:            test_vector_add
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_vector_add
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
    ; CHECK: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY2]]
    ; CHECK: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY1]], [[COPY3]]
    ; CHECK: $q0 = COPY [[ADD]](<2 x s64>)
    ; CHECK: $q1 = COPY [[ADD1]](<2 x s64>)
    %0:_(<2 x s64>) = COPY $q0
    %1:_(<2 x s64>) = COPY $q1
    %2:_(<2 x s64>) = COPY $q2
    %3:_(<2 x s64>) = COPY $q3
    %4:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1
    %5:_(<4 x s64>) = G_CONCAT_VECTORS %2, %3
    %6:_(<4 x s64>) = G_ADD %4, %5
    %7:_(<2 x s64>), %8:_(<2 x s64>) = G_UNMERGE_VALUES %6(<4 x s64>)
    $q0 = COPY %7(<2 x s64>)
    $q1 = COPY %8(<2 x s64>)

...
---
name:            test_vector_add_nonpow2
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_vector_add_nonpow2
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
    ; CHECK: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY1]]
    ; CHECK: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY1]], [[COPY2]]
    ; CHECK: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY2]], [[COPY3]]
    ; CHECK: $q0 = COPY [[ADD]](<2 x s64>)
    ; CHECK: $q1 = COPY [[ADD1]](<2 x s64>)
    ; CHECK: $q2 = COPY [[ADD2]](<2 x s64>)
    %0:_(<2 x s64>) = COPY $q0
    %1:_(<2 x s64>) = COPY $q1
    %2:_(<2 x s64>) = COPY $q2
    %3:_(<2 x s64>) = COPY $q3
    %4:_(<6 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>), %2(<2 x s64>)
    %5:_(<6 x s64>) = G_CONCAT_VECTORS %1(<2 x s64>), %2(<2 x s64>), %3(<2 x s64>)
    %6:_(<6 x s64>) = G_ADD %4, %5
    %7:_(<2 x s64>), %8:_(<2 x s64>), %9:_(<2 x s64>) = G_UNMERGE_VALUES %6(<6 x s64>)
    $q0 = COPY %7(<2 x s64>)
    $q1 = COPY %8(<2 x s64>)
    $q2 = COPY %9(<2 x s64>)
...
---
name:            add_v8i16
alignment:       4
tracksRegLiveness: true
machineFunctionInfo: {}
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: add_v8i16
    ; CHECK: liveins: $q0, $q1
    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
    ; CHECK: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[COPY]], [[COPY1]]
    ; CHECK: $q0 = COPY [[ADD]](<8 x s16>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<8 x s16>) = COPY $q0
    %1:_(<8 x s16>) = COPY $q1
    %2:_(<8 x s16>) = G_ADD %0, %1
    $q0 = COPY %2(<8 x s16>)
    RET_ReallyLR implicit $q0

...
---
name:            add_v16i8
alignment:       4
tracksRegLiveness: true
machineFunctionInfo: {}
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: add_v16i8
    ; CHECK: liveins: $q0, $q1
    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[COPY]], [[COPY1]]
    ; CHECK: $q0 = COPY [[ADD]](<16 x s8>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<16 x s8>) = COPY $q0
    %1:_(<16 x s8>) = COPY $q1
    %2:_(<16 x s8>) = G_ADD %0, %1
    $q0 = COPY %2(<16 x s8>)
    RET_ReallyLR implicit $q0

...