diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2021-04-11 18:41:51 +0100 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2021-04-11 18:42:01 +0100 |
commit | 38c799bce8016f7487067a031b62cf98069aaa4d (patch) | |
tree | 5bd3873e9a7582dc945d7adebe2f7520c3ce0800 /llvm | |
parent | [RISCV] Drop earlyclobber constraint from vwadd(u).wx, vwsub(u).wx, vfwadd.wf... (diff) | |
download | llvm-project-38c799bce8016f7487067a031b62cf98069aaa4d.tar.gz llvm-project-38c799bce8016f7487067a031b62cf98069aaa4d.tar.bz2 llvm-project-38c799bce8016f7487067a031b62cf98069aaa4d.zip |
[X86] Fold cmpeq/ne(and(X,Y),Y) --> cmpeq/ne(and(~X,Y),0)
Followup to D100177, handle an similar (demorgan inverse style) case from PR47797 as well
The AVX512 test cases could be further improved if we folded not(iX bitcast(vXi1)) -> (iX bitcast(not(vXi1)))
Alive2: https://alive2.llvm.org/ce/z/AnA_-W
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 22 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/bmi.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/movmsk-cmp.ll | 52 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/pr27202.ll | 10 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-reduce-and-bool.ll | 57 |
5 files changed, 84 insertions, 65 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 321d4791029a..3e260027bb83 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -48163,9 +48163,9 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V)); } - // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0) - // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0) if (OpVT.isScalarInteger()) { + // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0) + // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0) auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) { if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) { if (N0.getOperand(0) == N1) @@ -48181,6 +48181,24 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC); if (SDValue AndN = MatchOrCmpEq(RHS, LHS)) return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC); + + // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0) + // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0) + auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) { + if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) { + if (N0.getOperand(0) == N1) + return DAG.getNode(ISD::AND, DL, OpVT, N1, + DAG.getNOT(DL, N0.getOperand(1), OpVT)); + if (N0.getOperand(1) == N1) + return DAG.getNode(ISD::AND, DL, OpVT, N1, + DAG.getNOT(DL, N0.getOperand(0), OpVT)); + } + return SDValue(); + }; + if (SDValue AndN = MatchAndCmpEq(LHS, RHS)) + return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC); + if (SDValue AndN = MatchAndCmpEq(RHS, LHS)) + return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC); } } diff --git a/llvm/test/CodeGen/X86/bmi.ll b/llvm/test/CodeGen/X86/bmi.ll index 8aea6f99c144..7708c7255661 100644 --- a/llvm/test/CodeGen/X86/bmi.ll +++ b/llvm/test/CodeGen/X86/bmi.ll @@ -157,15 +157,15 @@ define i1 @and_cmp_const(i32 %x) { ; X86-LABEL: and_cmp_const: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: andl $43, %eax -; X86-NEXT: cmpl $43, %eax +; X86-NEXT: notl %eax +; X86-NEXT: testb $43, %al ; X86-NEXT: sete %al ; X86-NEXT: retl ; ; X64-LABEL: and_cmp_const: ; X64: # %bb.0: -; X64-NEXT: andl $43, %edi -; X64-NEXT: cmpl $43, %edi +; X64-NEXT: notl %edi +; X64-NEXT: testb $43, %dil ; X64-NEXT: sete %al ; X64-NEXT: retq %and = and i32 %x, 43 diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll index 748624f36bf7..3d0b7cd50fcf 100644 --- a/llvm/test/CodeGen/X86/movmsk-cmp.ll +++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll @@ -1019,8 +1019,8 @@ define i1 @allzeros_v2i64_not(<2 x i64> %a0) { ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $3, %al -; KNL-NEXT: cmpb $3, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $3, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -1840,8 +1840,8 @@ define i1 @allones_v4i32_and1(<4 x i32> %arg) { ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestmd {{.*}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $15, %al -; KNL-NEXT: cmpb $15, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -2156,8 +2156,8 @@ define i1 @allones_v2i64_and1(<2 x i64> %arg) { ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1] ; KNL-NEXT: vptestmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $3, %al -; KNL-NEXT: cmpb $3, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -2254,8 +2254,8 @@ define i1 @allones_v4i64_and1(<4 x i64> %arg) { ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmq {{.*}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $15, %al -; KNL-NEXT: cmpb $15, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -3161,8 +3161,8 @@ define i1 @allones_v4i32_and4(<4 x i32> %arg) { ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestmd {{.*}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $15, %al -; KNL-NEXT: cmpb $15, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -3477,8 +3477,8 @@ define i1 @allones_v2i64_and4(<2 x i64> %arg) { ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4] ; KNL-NEXT: vptestmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $3, %al -; KNL-NEXT: cmpb $3, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -3575,8 +3575,8 @@ define i1 @allones_v4i64_and4(<4 x i64> %arg) { ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmq {{.*}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $15, %al -; KNL-NEXT: cmpb $15, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -3999,8 +3999,8 @@ define i1 @movmsk_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: andb $-109, %al -; SSE-NEXT: cmpb $-109, %al +; SSE-NEXT: notb %al +; SSE-NEXT: testb $-109, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -4009,8 +4009,8 @@ define i1 @movmsk_v8i16(<8 x i16> %x, <8 x i16> %y) { ; AVX1OR2-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax -; AVX1OR2-NEXT: andb $-109, %al -; AVX1OR2-NEXT: cmpb $-109, %al +; AVX1OR2-NEXT: notb %al +; AVX1OR2-NEXT: testb $-109, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -4020,8 +4020,8 @@ define i1 @movmsk_v8i16(<8 x i16> %x, <8 x i16> %y) { ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $-109, %al -; KNL-NEXT: cmpb $-109, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $-109, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -4030,8 +4030,8 @@ define i1 @movmsk_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SKX: # %bb.0: ; SKX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andb $-109, %al -; SKX-NEXT: cmpb $-109, %al +; SKX-NEXT: notb %al +; SKX-NEXT: testb $-109, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %y @@ -4138,8 +4138,8 @@ define i1 @movmsk_and_v2i64(<2 x i64> %x, <2 x i64> %y) { ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpneqq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $3, %al -; KNL-NEXT: cmpb $3, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq @@ -4279,8 +4279,8 @@ define i1 @movmsk_and_v2f64(<2 x double> %x, <2 x double> %y) { ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmplepd %zmm0, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andb $3, %al -; KNL-NEXT: cmpb $3, %al +; KNL-NEXT: notb %al +; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq diff --git a/llvm/test/CodeGen/X86/pr27202.ll b/llvm/test/CodeGen/X86/pr27202.ll index 2496a4bae258..3bd3be62fb4c 100644 --- a/llvm/test/CodeGen/X86/pr27202.ll +++ b/llvm/test/CodeGen/X86/pr27202.ll @@ -4,9 +4,8 @@ define i1 @foo(i32 %i) optsize { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: movl $305419896, %eax # imm = 0x12345678 -; CHECK-NEXT: andl %eax, %edi -; CHECK-NEXT: cmpl %eax, %edi +; CHECK-NEXT: notl %edi +; CHECK-NEXT: testl $305419896, %edi # imm = 0x12345678 ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq %and = and i32 %i, 305419896 @@ -17,9 +16,8 @@ define i1 @foo(i32 %i) optsize { define i1 @foo_pgso(i32 %i) !prof !14 { ; CHECK-LABEL: foo_pgso: ; CHECK: # %bb.0: -; CHECK-NEXT: movl $305419896, %eax # imm = 0x12345678 -; CHECK-NEXT: andl %eax, %edi -; CHECK-NEXT: cmpl %eax, %edi +; CHECK-NEXT: notl %edi +; CHECK-NEXT: testl $305419896, %edi # imm = 0x12345678 ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq %and = and i32 %i, 305419896 diff --git a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll index e558660e4de8..4e0410f97346 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll @@ -33,8 +33,8 @@ define i1 @trunc_v2i64_v2i1(<2 x i64>) { ; AVX512F-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $3, %al -; AVX512F-NEXT: cmpb $3, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $3, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -44,8 +44,8 @@ define i1 @trunc_v2i64_v2i1(<2 x i64>) { ; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $3, %al -; AVX512BW-NEXT: cmpb $3, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $3, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -55,7 +55,8 @@ define i1 @trunc_v2i64_v2i1(<2 x i64>) { ; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0 ; AVX512VL-NEXT: kmovd %k0, %eax -; AVX512VL-NEXT: cmpb $3, %al +; AVX512VL-NEXT: notb %al +; AVX512VL-NEXT: testb $3, %al ; AVX512VL-NEXT: sete %al ; AVX512VL-NEXT: retq %a = trunc <2 x i64> %0 to <2 x i1> @@ -85,8 +86,8 @@ define i1 @trunc_v4i32_v4i1(<4 x i32>) { ; AVX512F-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $15, %al -; AVX512F-NEXT: cmpb $15, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $15, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -96,8 +97,8 @@ define i1 @trunc_v4i32_v4i1(<4 x i32>) { ; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $15, %al -; AVX512BW-NEXT: cmpb $15, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $15, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -107,7 +108,8 @@ define i1 @trunc_v4i32_v4i1(<4 x i32>) { ; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0 ; AVX512VL-NEXT: kmovd %k0, %eax -; AVX512VL-NEXT: cmpb $15, %al +; AVX512VL-NEXT: notb %al +; AVX512VL-NEXT: testb $15, %al ; AVX512VL-NEXT: sete %al ; AVX512VL-NEXT: retq %a = trunc <4 x i32> %0 to <4 x i1> @@ -244,8 +246,8 @@ define i1 @trunc_v4i64_v4i1(<4 x i64>) { ; AVX512F-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $15, %al -; AVX512F-NEXT: cmpb $15, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $15, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -255,8 +257,8 @@ define i1 @trunc_v4i64_v4i1(<4 x i64>) { ; AVX512BW-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $15, %al -; AVX512BW-NEXT: cmpb $15, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $15, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -266,7 +268,8 @@ define i1 @trunc_v4i64_v4i1(<4 x i64>) { ; AVX512VL-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX512VL-NEXT: vptestmq %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovd %k0, %eax -; AVX512VL-NEXT: cmpb $15, %al +; AVX512VL-NEXT: notb %al +; AVX512VL-NEXT: testb $15, %al ; AVX512VL-NEXT: sete %al ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq @@ -874,8 +877,8 @@ define i1 @icmp_v2i64_v2i1(<2 x i64>) { ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $3, %al -; AVX512F-NEXT: cmpb $3, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $3, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -885,8 +888,8 @@ define i1 @icmp_v2i64_v2i1(<2 x i64>) { ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $3, %al -; AVX512BW-NEXT: cmpb $3, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $3, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -930,8 +933,8 @@ define i1 @icmp_v4i32_v4i1(<4 x i32>) { ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $15, %al -; AVX512F-NEXT: cmpb $15, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $15, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -941,8 +944,8 @@ define i1 @icmp_v4i32_v4i1(<4 x i32>) { ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $15, %al -; AVX512BW-NEXT: cmpb $15, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $15, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -1109,8 +1112,8 @@ define i1 @icmp_v4i64_v4i1(<4 x i64>) { ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: andb $15, %al -; AVX512F-NEXT: cmpb $15, %al +; AVX512F-NEXT: notb %al +; AVX512F-NEXT: testb $15, %al ; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -1120,8 +1123,8 @@ define i1 @icmp_v4i64_v4i1(<4 x i64>) { ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andb $15, %al -; AVX512BW-NEXT: cmpb $15, %al +; AVX512BW-NEXT: notb %al +; AVX512BW-NEXT: testb $15, %al ; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq |