Custom lower ISD::VECREDUCE_AND/OR/XOR using vector logic ops Handling of any_of/all_of/parity patterns will happen later once we start dismantling combinePredicateReduction()
280 lines
11 KiB
LLVM
280 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux | FileCheck %s
|
|
|
|
; This test verifies that the experimental.vector.extract.last.active intrinsic
|
|
; doesn't cause an infinite loop during legalization when the step vector type
|
|
; needs widening (e.g., v4i8 -> v16i8 on X86).
|
|
|
|
define i32 @extract_last_active_v4i32(<4 x i32> %a, <4 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; CHECK-NEXT: por %xmm1, %xmm2
|
|
; CHECK-NEXT: pslld $31, %xmm1
|
|
; CHECK-NEXT: psrad $31, %xmm1
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
|
|
; CHECK-NEXT: por %xmm2, %xmm0
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: andb $1, %cl
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpb $1, %cl
|
|
; CHECK-NEXT: sbbl %eax, %eax
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; CHECK-NEXT: movd %xmm0, %edx
|
|
; CHECK-NEXT: cmpl %ecx, %edx
|
|
; CHECK-NEXT: cmoval %edx, %ecx
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
|
|
; CHECK-NEXT: movd %xmm0, %edx
|
|
; CHECK-NEXT: cmpl %edx, %ecx
|
|
; CHECK-NEXT: cmovbel %edx, %ecx
|
|
; CHECK-NEXT: orl -24(%rsp,%rcx,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32> %a, <4 x i1> %c, i32 -1)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @extract_last_active_v4i32_no_default(<4 x i32> %a, <4 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v4i32_no_default:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pslld $31, %xmm1
|
|
; CHECK-NEXT: psrad $31, %xmm1
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: cmpl %eax, %ecx
|
|
; CHECK-NEXT: cmoval %ecx, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: cmpl %ecx, %eax
|
|
; CHECK-NEXT: cmovbel %ecx, %eax
|
|
; CHECK-NEXT: movl -24(%rsp,%rax,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32> %a, <4 x i1> %c, i32 poison)
|
|
ret i32 %res
|
|
}
|
|
|
|
; Test v2i32 - smaller vector.
|
|
define i32 @extract_last_active_v2i32(<2 x i32> %a, <2 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; CHECK-NEXT: por %xmm1, %xmm2
|
|
; CHECK-NEXT: psllq $63, %xmm1
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movq %xmm2, %rcx
|
|
; CHECK-NEXT: andb $1, %cl
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpb $1, %cl
|
|
; CHECK-NEXT: sbbl %eax, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
|
|
; CHECK-NEXT: psrld $31, %xmm0
|
|
; CHECK-NEXT: movq %xmm0, %rcx
|
|
; CHECK-NEXT: movl %ecx, %ecx
|
|
; CHECK-NEXT: orl -24(%rsp,%rcx,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v2i32(<2 x i32> %a, <2 x i1> %c, i32 -1)
|
|
ret i32 %res
|
|
}
|
|
|
|
; Test v3i32 - non-power-of-2 element count that requires mask widening
|
|
; (v3i1 -> v4i1) via WidenVecOp_VECTOR_FIND_LAST_ACTIVE.
|
|
define i32 @extract_last_active_v3i32(<3 x i32> %a, <3 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v3i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movd %esi, %xmm1
|
|
; CHECK-NEXT: movd %edi, %xmm2
|
|
; CHECK-NEXT: movdqa %xmm2, %xmm3
|
|
; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; CHECK-NEXT: movd %edx, %xmm4
|
|
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: pslld $31, %xmm3
|
|
; CHECK-NEXT: psrad $31, %xmm3
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: cmpl %ecx, %eax
|
|
; CHECK-NEXT: cmoval %eax, %ecx
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpl $0, %ecx
|
|
; CHECK-NEXT: cmovbel %eax, %ecx
|
|
; CHECK-NEXT: por %xmm4, %xmm2
|
|
; CHECK-NEXT: por %xmm1, %xmm2
|
|
; CHECK-NEXT: movd %xmm2, %edx
|
|
; CHECK-NEXT: andb $1, %dl
|
|
; CHECK-NEXT: cmpb $1, %dl
|
|
; CHECK-NEXT: sbbl %eax, %eax
|
|
; CHECK-NEXT: orl -24(%rsp,%rcx,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v3i32(<3 x i32> %a, <3 x i1> %c, i32 -1)
|
|
ret i32 %res
|
|
}
|
|
|
|
; Test v8i32 - larger vector where step vector type doesn't need widening.
|
|
define i32 @extract_last_active_v8i32(<8 x i32> %a, <8 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v8i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
|
|
; CHECK-NEXT: por %xmm2, %xmm3
|
|
; CHECK-NEXT: psllw $15, %xmm2
|
|
; CHECK-NEXT: psraw $15, %xmm2
|
|
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
|
|
; CHECK-NEXT: por %xmm3, %xmm0
|
|
; CHECK-NEXT: movdqa %xmm0, %xmm1
|
|
; CHECK-NEXT: psrld $16, %xmm1
|
|
; CHECK-NEXT: por %xmm0, %xmm1
|
|
; CHECK-NEXT: movd %xmm1, %ecx
|
|
; CHECK-NEXT: andb $1, %cl
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpb $1, %cl
|
|
; CHECK-NEXT: sbbl %eax, %eax
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
|
|
; CHECK-NEXT: psubusw %xmm2, %xmm0
|
|
; CHECK-NEXT: paddw %xmm2, %xmm0
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; CHECK-NEXT: psubusw %xmm0, %xmm1
|
|
; CHECK-NEXT: paddw %xmm0, %xmm1
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: psrld $16, %xmm0
|
|
; CHECK-NEXT: psubusw %xmm1, %xmm0
|
|
; CHECK-NEXT: paddw %xmm1, %xmm0
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: andl $7, %ecx
|
|
; CHECK-NEXT: orl -40(%rsp,%rcx,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v8i32(<8 x i32> %a, <8 x i1> %c, i32 -1)
|
|
ret i32 %res
|
|
}
|
|
|
|
; Test v16i32 - even larger vector.
|
|
define i32 @extract_last_active_v16i32(<16 x i32> %a, <16 x i1> %c) {
|
|
; CHECK-LABEL: extract_last_active_v16i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
|
|
; CHECK-NEXT: por %xmm4, %xmm5
|
|
; CHECK-NEXT: psllw $7, %xmm4
|
|
; CHECK-NEXT: pxor %xmm6, %xmm6
|
|
; CHECK-NEXT: pcmpgtb %xmm4, %xmm6
|
|
; CHECK-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
|
|
; CHECK-NEXT: pmaxub %xmm6, %xmm0
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; CHECK-NEXT: pmaxub %xmm0, %xmm1
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: psrld $16, %xmm0
|
|
; CHECK-NEXT: pmaxub %xmm1, %xmm0
|
|
; CHECK-NEXT: movdqa %xmm0, %xmm1
|
|
; CHECK-NEXT: psrlw $8, %xmm1
|
|
; CHECK-NEXT: pmaxub %xmm0, %xmm1
|
|
; CHECK-NEXT: movd %xmm1, %ecx
|
|
; CHECK-NEXT: andl $15, %ecx
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
|
|
; CHECK-NEXT: por %xmm5, %xmm0
|
|
; CHECK-NEXT: movdqa %xmm0, %xmm1
|
|
; CHECK-NEXT: psrld $16, %xmm1
|
|
; CHECK-NEXT: por %xmm0, %xmm1
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: psrlw $8, %xmm0
|
|
; CHECK-NEXT: por %xmm1, %xmm0
|
|
; CHECK-NEXT: movd %xmm0, %edx
|
|
; CHECK-NEXT: andb $1, %dl
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpb $1, %dl
|
|
; CHECK-NEXT: sbbl %eax, %eax
|
|
; CHECK-NEXT: orl -72(%rsp,%rcx,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v16i32(<16 x i32> %a, <16 x i1> %c, i32 -1)
|
|
ret i32 %res
|
|
}
|
|
|
|
; Test for older x86 CPUs (pre-AVX) where the mask type legalization creates
|
|
; larger element types, requiring the step vector type adjustment.
|
|
define i32 @extract_last_active_v4i32_penryn(<4 x i32> %a, <4 x i1> %c) "target-cpu"="penryn" {
|
|
; CHECK-LABEL: extract_last_active_v4i32_penryn:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pslld $31, %xmm1
|
|
; CHECK-NEXT: psrad $31, %xmm1
|
|
; CHECK-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
|
; CHECK-NEXT: pextrd $2, %xmm1, %eax
|
|
; CHECK-NEXT: pextrd $1, %xmm1, %ecx
|
|
; CHECK-NEXT: cmpl %eax, %ecx
|
|
; CHECK-NEXT: cmoval %ecx, %eax
|
|
; CHECK-NEXT: pextrd $3, %xmm1, %ecx
|
|
; CHECK-NEXT: cmpl %ecx, %eax
|
|
; CHECK-NEXT: cmovbel %ecx, %eax
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movl -24(%rsp,%rax,4), %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32> %a, <4 x i1> %c, i32 poison)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i8 @extract_last_active_split(<32 x i8> %data, <32 x i8> %mask, i8 %passthru) {
|
|
; CHECK-LABEL: extract_last_active_split:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movdqa %xmm2, %xmm4
|
|
; CHECK-NEXT: por %xmm3, %xmm4
|
|
; CHECK-NEXT: pxor %xmm5, %xmm5
|
|
; CHECK-NEXT: pcmpeqb %xmm5, %xmm3
|
|
; CHECK-NEXT: pcmpeqb %xmm5, %xmm2
|
|
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
|
; CHECK-NEXT: pandn %xmm0, %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
|
|
; CHECK-NEXT: pmaxub %xmm2, %xmm1
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
|
; CHECK-NEXT: pmaxub %xmm1, %xmm2
|
|
; CHECK-NEXT: movdqa %xmm2, %xmm1
|
|
; CHECK-NEXT: psrld $16, %xmm1
|
|
; CHECK-NEXT: pmaxub %xmm2, %xmm1
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm2
|
|
; CHECK-NEXT: psrlw $8, %xmm2
|
|
; CHECK-NEXT: pmaxub %xmm1, %xmm2
|
|
; CHECK-NEXT: movd %xmm2, %eax
|
|
; CHECK-NEXT: pmovmskb %xmm3, %ecx
|
|
; CHECK-NEXT: pandn %xmm0, %xmm3
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
|
|
; CHECK-NEXT: pmaxub %xmm3, %xmm0
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; CHECK-NEXT: pmaxub %xmm0, %xmm1
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: psrld $16, %xmm0
|
|
; CHECK-NEXT: pmaxub %xmm1, %xmm0
|
|
; CHECK-NEXT: movdqa %xmm0, %xmm1
|
|
; CHECK-NEXT: psrlw $8, %xmm1
|
|
; CHECK-NEXT: pmaxub %xmm0, %xmm1
|
|
; CHECK-NEXT: movd %xmm1, %edx
|
|
; CHECK-NEXT: addl $16, %edx
|
|
; CHECK-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
|
|
; CHECK-NEXT: cmoveq %rax, %rdx
|
|
; CHECK-NEXT: andl $31, %edx
|
|
; CHECK-NEXT: movzbl -40(%rsp,%rdx), %eax
|
|
; CHECK-NEXT: pcmpeqb %xmm5, %xmm4
|
|
; CHECK-NEXT: pmovmskb %xmm4, %ecx
|
|
; CHECK-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
|
; CHECK-NEXT: # kill: def $al killed $al killed $eax
|
|
; CHECK-NEXT: retq
|
|
%notzero = icmp ne <32 x i8> %mask, zeroinitializer
|
|
%res = call i8 @llvm.experimental.vector.extract.last.active.v16i8(<32 x i8> %data, <32 x i1> %notzero, i8 %passthru)
|
|
ret i8 %res
|
|
}
|