diff --git a/cranelift/codegen/src/isa/riscv64/inst.isle b/cranelift/codegen/src/isa/riscv64/inst.isle index 6402390d9ece..fb6ee83b5899 100644 --- a/cranelift/codegen/src/isa/riscv64/inst.isle +++ b/cranelift/codegen/src/isa/riscv64/inst.isle @@ -2587,6 +2587,16 @@ (decl gen_select_xreg (IntegerCompare XReg XReg) XReg) +;; Rotate Zero Reg to the right. This allows us to write fewer rules +;; below when matching the zero register + +(rule 4 (gen_select_xreg (int_compare_decompose cc a @ (zero_reg) b) x y) + (gen_select_xreg (int_compare (intcc_swap_args cc) b a) x y)) + +(rule 3 (gen_select_xreg c @ (int_compare_decompose cc a b) x @ (zero_reg) y) + (gen_select_xreg (int_compare (intcc_complement cc) b a) y x)) + + (rule 2 (gen_select_xreg (int_compare_decompose cc x y) x y) (if-let (IntCC.UnsignedLessThan) (intcc_without_eq cc)) (if-let $true (has_zbb)) diff --git a/cranelift/filetests/filetests/isa/riscv64/zicond.clif b/cranelift/filetests/filetests/isa/riscv64/zicond.clif index e7fe08da7e64..4c77e38ccab1 100644 --- a/cranelift/filetests/filetests/isa/riscv64/zicond.clif +++ b/cranelift/filetests/filetests/isa/riscv64/zicond.clif @@ -41,22 +41,19 @@ block0(v0: i64, v1: i64): function %select_zero_icmp_neq_reverse(i64, i64) -> i64 { block0(v0: i64, v1: i64): v2 = iconst.i64 0 - v3 = icmp.i64 ne v0, v2 + v3 = icmp.i64 ne v2, v0 v4 = select.i64 v3, v2, v1 return v4 } ; VCode: ; block0: -; select a0,zero,a1##condition=(a0 ne zero) +; czero.nez a0,a1,a0 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; beqz a0, 0xc -; mv a0, zero -; j 8 -; mv a0, a1 +; .byte 0x33, 0xf5, 0xa5, 0x0e ; ret function %select_zero_icmp_eqz(i64, i64) -> i64 { @@ -81,21 +78,18 @@ block0(v0: i64, v1: i64): function %select_zero_icmp_eqz_reverse(i64, i64) -> i64 { block0(v0: i64, v1: i64): v2 = iconst.i64 0 - v3 = icmp.i64 eq v0, v2 + v3 = icmp.i64 eq v2, v0 v4 = select.i64 v3, v2, v1 return v4 } ; VCode: ; block0: -; select a0,zero,a1##condition=(a0 eq zero) +; czero.eqz a0,a1,a0 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; bnez a0, 0xc -; mv a0, zero -; j 8 -; mv a0, a1 +; .byte 0x33, 0xd5, 0xa5, 0x0e ; ret