diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index d3ade31740c7d..64f9e3eb8d86f 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -4812,6 +4812,8 @@ bool RISCV::isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS) { return true; if (RHS.isImm() && RHS.getImm() == RISCV::VLMaxSentinel) return true; + if (LHS.isImm() && LHS.getImm() == 0) + return true; if (LHS.isImm() && LHS.getImm() == RISCV::VLMaxSentinel) return false; if (!LHS.isImm() || !RHS.isImm()) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll index 7afd31fdd663c..a04e31a19a4f1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll @@ -439,9 +439,10 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vsadd_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll index f61b112fd8024..5556b11e9a90c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll @@ -435,9 +435,10 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vsaddu_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll index 6ddf2e464750e..c28317bf14269 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll @@ -454,14 +454,15 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vssub_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: li a1, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t +; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll index c403593894794..cbfe1292877ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll @@ -449,14 +449,15 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vssubu_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: li a1, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t +; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v