Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20241,6 +20241,10 @@ static SDValue trySimplifySrlAddToRshrnb(SDValue Srl, SelectionDAG &DAG,
return SDValue();
unsigned ShiftValue = SrlOp1->getZExtValue();

uint64_t EltSize = ResVT.getScalarSizeInBits();
if (ShiftValue > EltSize)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I discussed this with @paulwalker-arm offline and we don't think this is right after all. I think we should just bail out of the optimisation if the ShiftValue exceeds the truncated element width. Also, it would be good to at least add an assert here that the shift value is zero. From what you're saying it should never happen, which makes sense.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you please explain why you both came to the conclusion that this isn't correct?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry Matt this was my fault. My question arose from the typically behaviour of right shifts that are greater than the element bit length, whereby i32 >> #32+N == i32 >> #32.

Dave pointed out that in this case the element type in question is actually half sized because the combine is only expected to be called when a truncate is in play. That would imply i32 >> 16+N == i32 >> 16, which is clearly bogus.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. I've now just gone with

  if (ShiftValue < 1 || ShiftValue > ResVT.getScalarSizeInBits())
    return SDValue();

ShiftValue = EltSize;

SDValue Add = Srl->getOperand(0);
if (Add->getOpcode() != ISD::ADD || !Add->hasOneUse())
return SDValue();
Expand Down
63 changes: 63 additions & 0 deletions llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,23 @@ define void @neg_add_lshr_rshrnb_h_0(ptr %ptr, ptr %dst, i64 %index){
ret void
}

define void @neg_zero_shift(ptr %ptr, ptr %dst, i64 %index){
; CHECK-LABEL: neg_zero_shift:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: add z0.s, z0.s, #1 // =0x1
; CHECK-NEXT: st1h { z0.s }, p0, [x1, x2, lsl #1]
; CHECK-NEXT: ret
%load = load <vscale x 4 x i32>, ptr %ptr, align 2
%1 = add <vscale x 4 x i32> %load, trunc (<vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer) to <vscale x 4 x i32>)
%2 = lshr <vscale x 4 x i32> %1, trunc (<vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 0, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer) to <vscale x 4 x i32>)
%3 = trunc <vscale x 4 x i32> %2 to <vscale x 4 x i16>
%4 = getelementptr inbounds i16, ptr %dst, i64 %index
store <vscale x 4 x i16> %3, ptr %4, align 1
ret void
}

define void @wide_add_shift_add_rshrnb_b(ptr %dest, i64 %index, <vscale x 16 x i16> %arg1){
; CHECK-LABEL: wide_add_shift_add_rshrnb_b:
; CHECK: // %bb.0:
Expand Down Expand Up @@ -142,6 +159,52 @@ define void @wide_add_shift_add_rshrnb_h(ptr %dest, i64 %index, <vscale x 8 x i3
ret void
}

define void @wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4 x i64> %arg1){
; CHECK-LABEL: wide_add_shift_add_rshrnb_d:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: rshrnb z1.s, z1.d, #32
; CHECK-NEXT: rshrnb z0.s, z0.d, #32
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, x1, lsl #2]
; CHECK-NEXT: add z0.s, z1.s, z0.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0, x1, lsl #2]
; CHECK-NEXT: ret
%1 = add <vscale x 4 x i64> %arg1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 2147483648, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
%2 = lshr <vscale x 4 x i64> %1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 32, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
%3 = getelementptr inbounds i32, ptr %dest, i64 %index
%load = load <vscale x 4 x i32>, ptr %3, align 4
%4 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i32>
%5 = add <vscale x 4 x i32> %load, %4
store <vscale x 4 x i32> %5, ptr %3, align 4
ret void
}

; Do not emit rshrnb if the shift amount is larger than the dest eltsize in bits
define void @neg_wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4 x i64> %arg1){
; CHECK-LABEL: neg_wide_add_shift_add_rshrnb_d:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z2.d, #0x800000000000
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: add z0.d, z0.d, z2.d
; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: lsr z1.d, z1.d, #48
; CHECK-NEXT: lsr z0.d, z0.d, #48
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, x1, lsl #2]
; CHECK-NEXT: add z0.s, z1.s, z0.s
; CHECK-NEXT: st1w { z0.s }, p0, [x0, x1, lsl #2]
; CHECK-NEXT: ret
%1 = add <vscale x 4 x i64> %arg1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 140737488355328, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
%2 = lshr <vscale x 4 x i64> %1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 48, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
%3 = getelementptr inbounds i32, ptr %dest, i64 %index
%load = load <vscale x 4 x i32>, ptr %3, align 4
%4 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i32>
%5 = add <vscale x 4 x i32> %load, %4
store <vscale x 4 x i32> %5, ptr %3, align 4
ret void
}

define void @neg_trunc_lsr_add_op1_not_splat(ptr %ptr, ptr %dst, i64 %index, <vscale x 8 x i16> %add_op1){
; CHECK-LABEL: neg_trunc_lsr_add_op1_not_splat:
; CHECK: // %bb.0:
Expand Down