// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=4 -mvscale-max=4 -S -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s // REQUIRES: riscv-registered-target #include typedef __rvv_int8m1_t vint8m1_t; typedef __rvv_uint8m1_t vuint8m1_t; typedef __rvv_int16m1_t vint16m1_t; typedef __rvv_uint16m1_t vuint16m1_t; typedef __rvv_int32m1_t vint32m1_t; typedef __rvv_uint32m1_t vuint32m1_t; typedef __rvv_int64m1_t vint64m1_t; typedef __rvv_uint64m1_t vuint64m1_t; typedef __rvv_float32m1_t vfloat32m1_t; typedef __rvv_float64m1_t vfloat64m1_t; typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); fixed_int32m1_t global_vec; // CHECK-LABEL: @test_ptr_to_global( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: store ptr @global_vec, ptr [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[TMP0]], align 8 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32m1_t test_ptr_to_global() { fixed_int32m1_t *global_vec_ptr; global_vec_ptr = &global_vec; return *global_vec_ptr; } // // Test casting pointer from fixed-length array to scalable vector. // CHECK-LABEL: @array_arg( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <8 x i32>, ptr [[TMP0]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 8 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32m1_t array_arg(fixed_int32m1_t arr[]) { return arr[0]; } // CHECK-LABEL: @test_cast( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i32>, align 8 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 4 // CHECK-NEXT: store [[VEC:%.*]], ptr [[VEC_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr @global_vec, align 8 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP0]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = load , ptr [[VEC_ADDR]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( poison, [[CASTSCALABLESVE]], [[TMP1]], i64 8) // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[TMP2]], i64 0) // CHECK-NEXT: store <8 x i32> [[CASTFIXEDSVE]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 // CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE1]] // fixed_int32m1_t test_cast(vint32m1_t vec) { return __riscv_vadd(global_vec, vec, __riscv_v_fixed_vlen/32); }