prelegalizercombiner-ptradd-chain.mir 2.28 KB
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-apple-ios  -run-pass=aarch64-prelegalizer-combiner %s -o - -verify-machineinstrs | FileCheck %s

# Check that we fold two adds of constant offsets with G_PTR_ADD into a single G_PTR_ADD.
---
name: ptradd_chain
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $x0

    ; CHECK-LABEL: name: ptradd_chain
    ; CHECK: liveins: $x0
    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
    ; CHECK: $x0 = COPY [[PTR_ADD]](p0)
    ; CHECK: RET_ReallyLR implicit $x0
    %0:_(p0) = COPY $x0
    %1:_(s64) = G_CONSTANT i64 4
    %2:_(s64) = G_CONSTANT i64 12
    %3:_(p0) = G_PTR_ADD %0(p0), %1
    %4:_(p0) = G_PTR_ADD %3(p0), %2
    $x0 = COPY %4(p0)
    RET_ReallyLR implicit $x0
...
---
name: ptradd_chain_2
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $x0
    ; CHECK-LABEL: name: ptradd_chain_2
    ; CHECK: liveins: $x0
    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
    ; CHECK: $x0 = COPY [[PTR_ADD]](p0)
    ; CHECK: RET_ReallyLR implicit $x0
    %0:_(p0) = COPY $x0
    %1:_(s64) = G_CONSTANT i64 4
    %2:_(s64) = G_CONSTANT i64 12
    %3:_(p0) = G_PTR_ADD %0(p0), %1
    %4:_(p0) = G_PTR_ADD %3(p0), %2
    %5:_(p0) = G_PTR_ADD %4(p0), %2
    $x0 = COPY %5(p0)
    RET_ReallyLR implicit $x0
...
---
name: ptradd_chain_lookthough
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $x0
    ; CHECK-LABEL: name: ptradd_chain_lookthough
    ; CHECK: liveins: $x0
    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
    ; CHECK: $x0 = COPY [[PTR_ADD]](p0)
    ; CHECK: RET_ReallyLR implicit $x0
    %0:_(p0) = COPY $x0
    %1:_(s64) = G_CONSTANT i64 4
    %2:_(s64) = G_CONSTANT i64 12
    %6:_(s32) = G_TRUNC %2(s64)
    %7:_(s64) = G_SEXT %6(s32)
    %3:_(p0) = G_PTR_ADD %0(p0), %1
    %4:_(p0) = G_PTR_ADD %3(p0), %2
    %5:_(p0) = G_PTR_ADD %4(p0), %7
    $x0 = COPY %5(p0)
    RET_ReallyLR implicit $x0
...