summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Bataev <a.bataev@hotmail.com>2019-10-17 17:12:03 +0000
committerAlexey Bataev <a.bataev@hotmail.com>2019-10-17 17:12:03 +0000
commit68798ad421f79deef8f9be75dd3c6ae3a25f412f (patch)
tree86c128b67faa84686b2b74ecf724f7a6c70f6201
parent3b8d539899d6651504edfbaaa2ea68eb9d7aa6ac (diff)
downloadclang-68798ad421f79deef8f9be75dd3c6ae3a25f412f.tar.gz
[OPENMP]Improve use of the global tid parameter.
If we can determined, that the global tid parameter can be used in the function, better to use it rather than calling __kmpc_global_thread_num function. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@375134 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp23
-rw-r--r--test/OpenMP/openmp_win_codegen.cpp3
-rw-r--r--test/OpenMP/parallel_for_codegen.cpp13
3 files changed, 21 insertions, 18 deletions
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 6a6659f906..8b853e8aa1 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1697,18 +1697,23 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
return ThreadID;
}
// If exceptions are enabled, do not use parameter to avoid possible crash.
- if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
- !CGF.getLangOpts().CXXExceptions ||
- CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- if (OMPRegionInfo->getThreadIDVariable()) {
- // Check if this an outlined function with thread id passed as argument.
- LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ if (auto *OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ if (OMPRegionInfo->getThreadIDVariable()) {
+ // Check if this an outlined function with thread id passed as argument.
+ LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
+ if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
+ !CGF.getLangOpts().CXXExceptions ||
+ CGF.Builder.GetInsertBlock() == TopBlock ||
+ !isa<llvm::Instruction>(LVal.getPointer()) ||
+ cast<llvm::Instruction>(LVal.getPointer())->getParent() == TopBlock ||
+ cast<llvm::Instruction>(LVal.getPointer())->getParent() ==
+ CGF.Builder.GetInsertBlock()) {
ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
// If value loaded in entry block, cache it and use it everywhere in
// function.
- if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
+ if (CGF.Builder.GetInsertBlock() == TopBlock) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = ThreadID;
}
diff --git a/test/OpenMP/openmp_win_codegen.cpp b/test/OpenMP/openmp_win_codegen.cpp
index 289f058966..b044176793 100644
--- a/test/OpenMP/openmp_win_codegen.cpp
+++ b/test/OpenMP/openmp_win_codegen.cpp
@@ -50,11 +50,10 @@ int main() {
}
// CHECK: define internal void [[OUTLINED]](
-// CHECK: [[GID:%.+]] = {{.*}}call i32 @__kmpc_global_thread_num(%struct.ident_t* {{.*}}@0)
// CHECK: invoke void @{{.+}}foo
// CHECK: [[CATCHSWITCH:%.+]] = catchswitch within none
// CHECK: [[CATCHPAD:%.+]] = catchpad within [[CATCHSWITCH]]
-// CHECK: call void @__kmpc_critical(%struct.ident_t* {{.*}}@0, i32 [[GID]],
+// CHECK: call void @__kmpc_critical(%struct.ident_t* {{.*}}@0, i32 [[GID:%.+]],
// CHECK: invoke void @{{.+}}bar
// CHECK: call void @__kmpc_end_critical(%struct.ident_t* {{.*}}@0, i32 [[GID]],
// CHECK: catchret from [[CATCHPAD]] to
diff --git a/test/OpenMP/parallel_for_codegen.cpp b/test/OpenMP/parallel_for_codegen.cpp
index ffa715548a..262de00955 100644
--- a/test/OpenMP/parallel_for_codegen.cpp
+++ b/test/OpenMP/parallel_for_codegen.cpp
@@ -40,7 +40,7 @@ void with_var_schedule() {
// CHECK: [[CHUNK_VAL:%.+]] = load i8, i8* %
// CHECK: [[CHUNK_SIZE:%.+]] = sext i8 [[CHUNK_VAL]] to i64
// CHECK: call void @__kmpc_for_static_init_8u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%[^,]+]], i32 33, i32* [[IS_LAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]], i64 1, i64 [[CHUNK_SIZE]])
-// CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]])
+// CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]])
#pragma omp parallel for schedule(static, char(a)) private(a)
for (unsigned long long i = 1; i < 2 + a; ++i) {
}
@@ -284,7 +284,7 @@ void test_auto(float *a, float *b, float *c, float *d) {
// CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]],
// CHECK: call void @__kmpc_dispatch_init_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 38, i64 0, i64 [[LAST_ITER:%[^,]+]], i64 1, i64 1)
//
-// CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]])
+// CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]])
// CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0
// CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
@@ -494,7 +494,6 @@ void range_for_single() {
// OMP5: [[IS_LAST:%.+]] = alloca i32,
// OMP5: [[BEGIN:%.+]] = alloca i32*,
// OMP5: [[A_PTR:%.+]] = alloca i32*,
-// OMP5: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(
// __range = arr;
// OMP5: [[ARR:%.+]] = load [10 x i32]*, [10 x i32]** [[ARR_ADDR]],
@@ -551,7 +550,7 @@ void range_for_single() {
// OMP5: store i32 0, i32* [[IS_LAST]],
// loop.
-// OMP5: call void @__kmpc_for_static_init_8(%struct.ident_t* {{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST]], i64* [[LB]], i64* [[UB]], i64* [[STRIDE]], i64 1, i64 1)
+// OMP5: call void @__kmpc_for_static_init_8(%struct.ident_t* {{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST]], i64* [[LB]], i64* [[UB]], i64* [[STRIDE]], i64 1, i64 1)
// ub = (ub > number_of_elems ? number_of_elems : ub);
// OMP5: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
@@ -613,7 +612,7 @@ void range_for_single() {
// end:
// OMP5: [[END]]:
-// OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 [[GTID]])
+// OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 [[GTID:%.+]])
// exit:
// OMP5: [[EXIT]]:
// OMP5: ret void
@@ -621,12 +620,12 @@ void range_for_single() {
// OMP5-LABEL: range_for_collapsed
void range_for_collapsed() {
int arr[10] = {0};
-// OMP5: call void @__kmpc_for_static_init_8(%struct.ident_t* {{.+}}, i32 [[GTID%.+]], i32 34, i32* %{{.+}}, i64* %{{.+}}, i64* %{{.+}}, i64* %{{.+}}, i64 1, i64 1)
+// OMP5: call void @__kmpc_for_static_init_8(%struct.ident_t* {{.+}}, i32 [[GTID:%.+]], i32 34, i32* %{{.+}}, i64* %{{.+}}, i64* %{{.+}}, i64* %{{.+}}, i64 1, i64 1)
#pragma omp parallel for collapse(2)
for (auto &a : arr)
for (auto b : arr)
a = b;
-// OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 [[GTID]])
+// OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 [[GTID:%.+]])
}
#endif // OMP5