// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -mvscale-min=4 -mvscale-max=4 -S -O1 -emit-llvm -o - %s | FileCheck %s // REQUIRES: aarch64-registered-target #include #define N __ARM_FEATURE_SVE_BITS typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); //===----------------------------------------------------------------------===// // Test caller/callee with VLST <-> VLAT //===----------------------------------------------------------------------===// // CHECK-LABEL: @sizeless_callee( // CHECK-NEXT: entry: // CHECK-NEXT: ret [[X:%.*]] // svint32_t sizeless_callee(svint32_t x) { return x; } // CHECK-LABEL: @fixed_caller( // CHECK-NEXT: entry: // CHECK-NEXT: ret [[X_COERCE:%.*]] // fixed_int32_t fixed_caller(fixed_int32_t x) { return sizeless_callee(x); } // CHECK-LABEL: @fixed_callee( // CHECK-NEXT: entry: // CHECK-NEXT: ret [[X_COERCE:%.*]] // fixed_int32_t fixed_callee(fixed_int32_t x) { return x; } // CHECK-LABEL: @sizeless_caller( // CHECK-NEXT: entry: // CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: store [[X:%.*]], ptr [[COERCE1]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE2]] // svint32_t sizeless_caller(svint32_t x) { return fixed_callee(x); } //===----------------------------------------------------------------------===// // fixed, fixed //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1_COERCE:%.*]], [[OP2_COERCE:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1_COERCE:%.*]], [[OP2_COERCE:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP2:%.*]] = select [[PG:%.*]], [[TMP0:%.*]], [[TMP1:%.*]] // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { return svsel(pg, op1, op2); } //===----------------------------------------------------------------------===// // fixed, scalable //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1_COERCE:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1_COERCE:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP1:%.*]] = select [[PG:%.*]], [[TMP0:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { return svsel(pg, op1, op2); } //===----------------------------------------------------------------------===// // scalable, scalable //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = select [[TMP0]], [[OP1:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = select [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]] // CHECK-NEXT: ret [[TMP0]] // fixed_bool_t call_bool_ss(svbool_t pg, svbool_t op1, svbool_t op2) { return svsel(pg, op1, op2); }