; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define void @buildvec_vid_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_undefelts_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_undefelts_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } ; TODO: Could do VID then insertelement on missing elements define void @buildvec_notquite_vid_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_notquite_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_plus_imm_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_plus_imm_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_mpy_imm_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_step2_add0_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 store <4 x i8> , ptr %z1 store <4 x i8> , ptr %z2 store <4 x i8> , ptr %z3 ret void } define void @buildvec_vid_step2_add1_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 store <4 x i8> , ptr %z1 store <4 x i8> , ptr %z2 store <4 x i8> , ptr %z3 ret void } ; FIXME: This could generate vrsub.vi but the (ISD::MUL X, -1) we generate ; while lowering ISD::BUILD_VECTOR is custom-lowered to RISCVISD::MUL_VL before ; being combined. define void @buildvec_vid_stepn1_add0_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 store <4 x i8> , ptr %z1 store <4 x i8> , ptr %z2 store <4 x i8> , ptr %z3 ret void } define void @buildvec_vid_stepn2_add0_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 store <4 x i8> , ptr %z1 store <4 x i8> , ptr %z2 store <4 x i8> , ptr %z3 ret void } define void @buildvec_vid_stepn2_add3_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn2_add3_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 3 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 ret void } define void @buildvec_vid_stepn3_add3_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn3_add3_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a1, -3 ; CHECK-NEXT: vmadd.vx v9, a1, v8 ; CHECK-NEXT: vse8.v v9, (a0) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 ret void } define void @buildvec_vid_stepn3_addn3_v4i32(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn3_addn3_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -3 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a4, -3 ; CHECK-NEXT: vmadd.vx v9, a4, v8 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: vse32.v v9, (a1) ; CHECK-NEXT: vse32.v v9, (a2) ; CHECK-NEXT: vse32.v v9, (a3) ; CHECK-NEXT: ret store <4 x i32> , ptr %z0 store <4 x i32> , ptr %z1 store <4 x i32> , ptr %z2 store <4 x i32> , ptr %z3 ret void } ; FIXME: RV32 doesn't catch this pattern due to BUILD_VECTOR legalization. define <4 x i64> @buildvec_vid_step1_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step1_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 1 ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI12_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step1_add0_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vi v9, v8, 2 ; RV64-NEXT: ret ret <4 x i64> } define <4 x i64> @buildvec_vid_step2_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step2_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 2 ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI13_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step2_add0_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vadd.vi v9, v8, 4 ; RV64-NEXT: ret ret <4 x i64> } define void @buildvec_no_vid_v4i8(ptr %z0, ptr %z1, ptr %z2, ptr %z3, ptr %z4, ptr %z5) { ; CHECK-LABEL: buildvec_no_vid_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a6, %hi(.LCPI14_0) ; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a6) ; CHECK-NEXT: lui a6, %hi(.LCPI14_1) ; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_1) ; CHECK-NEXT: vle8.v v9, (a6) ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vse8.v v9, (a1) ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: slli a0, a0, 11 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: li a0, 2047 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: lui a0, %hi(.LCPI14_2) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: vmv.v.i v8, -2 ; CHECK-NEXT: vse8.v v8, (a4) ; CHECK-NEXT: vse8.v v9, (a5) ; CHECK-NEXT: ret store <4 x i8> , ptr %z0 store <4 x i8> , ptr %z1 store <4 x i8> , ptr %z2 store <4 x i8> , ptr %z3 store <4 x i8> , ptr %z4 store <4 x i8> , ptr %z5 ret void } define void @buildvec_dominant0_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_dominant0_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 8 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define void @buildvec_dominant1_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_dominant1_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define void @buildvec_dominant0_v2i8(ptr %x) { ; CHECK-LABEL: buildvec_dominant0_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: ret store <2 x i8> , ptr %x ret void } define void @buildvec_dominant1_v2i8(ptr %x) { ; CHECK-LABEL: buildvec_dominant1_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <2 x i8> , ptr %x ret void } define void @buildvec_dominant2_v2i8(ptr %x) { ; CHECK-LABEL: buildvec_dominant2_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <2 x i8> , ptr %x ret void } define void @buildvec_dominant0_v2i32(ptr %x) { ; RV32-LABEL: buildvec_dominant0_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI20_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI20_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_dominant0_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI20_0) ; RV64-NEXT: ld a1, %lo(.LCPI20_0)(a1) ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, -1 ; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret store <2 x i64> , ptr %x ret void } define void @buildvec_dominant1_optsize_v2i32(ptr %x) optsize { ; RV32-LABEL: buildvec_dominant1_optsize_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI21_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI21_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_dominant1_optsize_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI21_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI21_0) ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret store <2 x i64> , ptr %x ret void } define void @buildvec_seq_v8i8_v4i16(ptr %x) { ; CHECK-LABEL: buildvec_seq_v8i8_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 513 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <8 x i8> , ptr %x ret void } define void @buildvec_seq_v8i8_v2i32(ptr %x) { ; RV32-LABEL: buildvec_seq_v8i8_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 48 ; RV32-NEXT: addi a1, a1, 513 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v8i8_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 48 ; RV64-NEXT: addiw a1, a1, 513 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <8 x i8> , ptr %x ret void } define void @buildvec_seq_v16i8_v2i64(ptr %x) { ; RV32-LABEL: buildvec_seq_v16i8_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI24_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI24_0) ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a1) ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v16i8_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI24_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI24_0) ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_seq2_v16i8_v2i64(ptr %x) { ; RV32-LABEL: buildvec_seq2_v16i8_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 528432 ; RV32-NEXT: addi a1, a1, 513 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq2_v16i8_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 528432 ; RV64-NEXT: addiw a1, a1, 513 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_seq_v9i8(ptr %x) { ; RV32-LABEL: buildvec_seq_v9i8: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 3 ; RV32-NEXT: sb a1, 8(a0) ; RV32-NEXT: li a1, 73 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vmv.v.i v9, 2 ; RV32-NEXT: li a1, 36 ; RV32-NEXT: vmv.s.x v8, a1 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: vmv1r.v v0, v8 ; RV32-NEXT: vmerge.vim v8, v9, 3, v0 ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v9i8: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI26_0) ; RV64-NEXT: ld a1, %lo(.LCPI26_0)(a1) ; RV64-NEXT: li a2, 3 ; RV64-NEXT: sb a2, 8(a0) ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret store <9 x i8> , ptr %x ret void } define void @buildvec_seq_v4i16_v2i32(ptr %x) { ; CHECK-LABEL: buildvec_seq_v4i16_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, -127 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <4 x i16> , ptr %x ret void } define void @buildvec_vid_step1o2_v4i32(ptr %z0, ptr %z1, ptr %z2, ptr %z3, ptr %z4, ptr %z5, ptr %z6) { ; CHECK-LABEL: buildvec_vid_step1o2_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a2) ; CHECK-NEXT: vse32.v v8, (a3) ; CHECK-NEXT: vse32.v v8, (a4) ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 1 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vslideup.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v10, (a5) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: vse32.v v8, (a6) ; CHECK-NEXT: ret store <4 x i32> , ptr %z0 store <4 x i32> , ptr %z1 store <4 x i32> , ptr %z2 store <4 x i32> , ptr %z3 store <4 x i32> , ptr %z4 ; We don't catch this one store <4 x i32> , ptr %z5 ; We catch this one but as VID/3 rather than VID/2 store <4 x i32> , ptr %z6 ret void } define void @buildvec_vid_step1o2_add3_v4i16(ptr %z0, ptr %z1, ptr %z2, ptr %z3, ptr %z4, ptr %z5, ptr %z6) { ; CHECK-LABEL: buildvec_vid_step1o2_add3_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vadd.vi v8, v8, 3 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: vse16.v v8, (a3) ; CHECK-NEXT: vse16.v v8, (a4) ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: vmv.v.i v9, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vslideup.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v10, (a5) ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: vse16.v v8, (a6) ; CHECK-NEXT: ret store <4 x i16> , ptr %z0 store <4 x i16> , ptr %z1 store <4 x i16> , ptr %z2 store <4 x i16> , ptr %z3 store <4 x i16> , ptr %z4 ; We don't catch this one store <4 x i16> , ptr %z5 ; We catch this one but as VID/3 rather than VID/2 store <4 x i16> , ptr %z6 ret void } define void @buildvec_vid_stepn1o4_addn5_v8i8(ptr %z0) { ; CHECK-LABEL: buildvec_vid_stepn1o4_addn5_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vrsub.vi v8, v8, -5 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <8 x i8> , ptr %z0 ret void } define void @buildvec_vid_mpy_imm_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 17 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define void @buildvec_vid_shl_imm_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_vid_shl_imm_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define <4 x i32> @splat_c3_v4i32(<4 x i32> %v) { ; CHECK-LABEL: splat_c3_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 3 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x i32> %v, i32 3 %ins = insertelement <4 x i32> poison, i32 %x, i32 0 %splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer ret <4 x i32> %splat } define <4 x i32> @splat_idx_v4i32(<4 x i32> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x i32> %v, i64 %idx %ins = insertelement <4 x i32> poison, i32 %x, i32 0 %splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer ret <4 x i32> %splat } define <8 x i16> @splat_c4_v8i16(<8 x i16> %v) { ; CHECK-LABEL: splat_c4_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 4 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <8 x i16> %v, i32 4 %ins = insertelement <8 x i16> poison, i16 %x, i32 0 %splat = shufflevector <8 x i16> %ins, <8 x i16> poison, <8 x i32> zeroinitializer ret <8 x i16> %splat } define <8 x i16> @splat_idx_v8i16(<8 x i16> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <8 x i16> %v, i64 %idx %ins = insertelement <8 x i16> poison, i16 %x, i32 0 %splat = shufflevector <8 x i16> %ins, <8 x i16> poison, <8 x i32> zeroinitializer ret <8 x i16> %splat } define <4 x i8> @buildvec_not_vid_v4i8_1() { ; CHECK-LABEL: buildvec_not_vid_v4i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI37_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI37_0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_not_vid_v4i8_2() { ; CHECK-LABEL: buildvec_not_vid_v4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI38_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret ret <4 x i8> } ; We match this as a VID sequence (-3 / 8) + 5 but choose not to introduce ; division to compute it. define <16 x i8> @buildvec_not_vid_v16i8() { ; CHECK-LABEL: buildvec_not_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 3 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret ret <16 x i8> }