summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Malcomson <matthew.malcomson@arm.com>2018-09-26 09:13:18 +0000
committerMatthew Malcomson <matmal01@gcc.gnu.org>2018-09-26 09:13:18 +0000
commit5e14418b07376add230c52ac4c53efd65fd3b8c9 (patch)
tree5aee864fecc30e58c335c1ed93ad335c982d8301
parent539866048e546c696afbb49d4366fe6477f50130 (diff)
downloadgcc-5e14418b07376add230c52ac4c53efd65fd3b8c9.tar.gz
A few places in the arm and aarch64 backends check whether an atomic operation needs acquire or release semantics.
A few places in the arm and aarch64 backends check whether an atomic operation needs acquire or release semantics. This is generally done with a check like (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) In this patch we introduce two helper functions to make things a little tidier. There are a few places in the arm/ backend that check whether an operation needs memory model semantics with an idiom that can now be replaced with the new aarch_mm_needs_* functions, so we make that replacement. There is also some backslash removal to make things a little tidier. Full bootstrap and regression test plus cross-compilation regression tests done on arm-none-linux-gnueabihf. Ok for trunk? gcc/ChangeLog: 2018-09-20 Matthew Malcomson <matthew.malcomson@arm.com> * config/arm/arm.c (arm_split_compare_and_swap, arm_split_atomic_op): Use new helper functions. * config/arm/sync.md (atomic_load<mode>, atomic_store<mode>): Use new helper functions. * config/arm/aarch-common-protos.h (aarch_mm_needs_acquire, aarch_mm_needs_release): New declarations. * config/arm/aarch-common.c (aarch_mm_needs_acquire, aarch_mm_needs_release): New. From-SVN: r264598
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/config/arm/aarch-common-protos.h2
-rw-r--r--gcc/config/arm/aarch-common.c23
-rw-r--r--gcc/config/arm/arm.c23
-rw-r--r--gcc/config/arm/sync.md22
5 files changed, 53 insertions, 28 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 4f606d35373..2eeacefe612 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2018-09-26 Matthew Malcomson <matthew.malcomson@arm.com>
+
+ * config/arm/arm.c (arm_split_compare_and_swap, arm_split_atomic_op):
+ Use new helper functions.
+ * config/arm/sync.md (atomic_load<mode>, atomic_store<mode>):
+ Use new helper functions.
+ * config/arm/aarch-common-protos.h (aarch_mm_needs_acquire,
+ aarch_mm_needs_release): New declarations.
+ * config/arm/aarch-common.c (aarch_mm_needs_acquire,
+ aarch_mm_needs_release): New.
+
2018-09-26 Eric Botcazou <ebotcazou@adacore.com>
* config/arm/arm.c (arm_reorg): Skip Thumb reorg pass for thunks.
diff --git a/gcc/config/arm/aarch-common-protos.h b/gcc/config/arm/aarch-common-protos.h
index 6204482bbb9..b9a9b0438f6 100644
--- a/gcc/config/arm/aarch-common-protos.h
+++ b/gcc/config/arm/aarch-common-protos.h
@@ -28,6 +28,8 @@ extern int aarch_crypto_can_dual_issue (rtx_insn *, rtx_insn *);
extern bool aarch_rev16_p (rtx);
extern bool aarch_rev16_shleft_mask_imm_p (rtx, machine_mode);
extern bool aarch_rev16_shright_mask_imm_p (rtx, machine_mode);
+extern bool aarch_mm_needs_acquire (rtx);
+extern bool aarch_mm_needs_release (rtx);
extern int arm_early_load_addr_dep (rtx, rtx);
extern int arm_early_load_addr_dep_ptr (rtx, rtx);
extern int arm_early_store_addr_dep (rtx, rtx);
diff --git a/gcc/config/arm/aarch-common.c b/gcc/config/arm/aarch-common.c
index f0675a339f9..14eb4957548 100644
--- a/gcc/config/arm/aarch-common.c
+++ b/gcc/config/arm/aarch-common.c
@@ -29,6 +29,7 @@
#include "tm.h"
#include "rtl.h"
#include "rtl-iter.h"
+#include "memmodel.h"
/* In ARMv8-A there's a general expectation that AESE/AESMC
and AESD/AESIMC sequences of the form:
@@ -230,6 +231,28 @@ aarch_rev16_p (rtx x)
return is_rev;
}
+/* Return non-zero if the RTX representing a memory model is a memory model
+ that needs acquire semantics. */
+bool
+aarch_mm_needs_acquire (rtx const_int)
+{
+ enum memmodel model = memmodel_from_int (INTVAL (const_int));
+ return !(is_mm_relaxed (model)
+ || is_mm_consume (model)
+ || is_mm_release (model));
+}
+
+/* Return non-zero if the RTX representing a memory model is a memory model
+ that needs release semantics. */
+bool
+aarch_mm_needs_release (rtx const_int)
+{
+ enum memmodel model = memmodel_from_int (INTVAL (const_int));
+ return !(is_mm_relaxed (model)
+ || is_mm_consume (model)
+ || is_mm_acquire (model));
+}
+
/* Return nonzero if the CONSUMER instruction (a load) does need
PRODUCER's value to calculate the address. */
int
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index c6e3f7954fb..8810df53aa3 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -28645,7 +28645,7 @@ arm_expand_compare_and_swap (rtx operands[])
void
arm_split_compare_and_swap (rtx operands[])
{
- rtx rval, mem, oldval, newval, neg_bval;
+ rtx rval, mem, oldval, newval, neg_bval, mod_s_rtx;
machine_mode mode;
enum memmodel mod_s, mod_f;
bool is_weak;
@@ -28657,20 +28657,16 @@ arm_split_compare_and_swap (rtx operands[])
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
- mod_s = memmodel_from_int (INTVAL (operands[6]));
+ mod_s_rtx = operands[6];
+ mod_s = memmodel_from_int (INTVAL (mod_s_rtx));
mod_f = memmodel_from_int (INTVAL (operands[7]));
neg_bval = TARGET_THUMB1 ? operands[0] : operands[8];
mode = GET_MODE (mem);
bool is_armv8_sync = arm_arch8 && is_mm_sync (mod_s);
- bool use_acquire = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
- || is_mm_release (mod_s));
-
- bool use_release = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
- || is_mm_acquire (mod_s));
+ bool use_acquire = TARGET_HAVE_LDACQ && aarch_mm_needs_acquire (mod_s_rtx);
+ bool use_release = TARGET_HAVE_LDACQ && aarch_mm_needs_release (mod_s_rtx);
/* For ARMv8, the load-acquire is too weak for __sync memory orders. Instead,
a full barrier is emitted after the store-release. */
@@ -28765,13 +28761,8 @@ arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
bool is_armv8_sync = arm_arch8 && is_mm_sync (model);
- bool use_acquire = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (model) || is_mm_consume (model)
- || is_mm_release (model));
-
- bool use_release = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (model) || is_mm_consume (model)
- || is_mm_acquire (model));
+ bool use_acquire = TARGET_HAVE_LDACQ && aarch_mm_needs_acquire (model_rtx);
+ bool use_release = TARGET_HAVE_LDACQ && aarch_mm_needs_release (model_rtx);
/* For ARMv8, a load-acquire is too weak for __sync memory orders. Instead,
a full barrier is emitted after the store-release. */
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 7141c76bef5..08fc5d1a8ab 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -70,20 +70,19 @@
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
- if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
+ if (aarch_mm_needs_acquire (operands[2]))
{
if (TARGET_THUMB1)
- return \"ldr<sync_sfx>\\t%0, %1\";
+ return "lda<sync_sfx>\t%0, %1";
else
- return \"ldr<sync_sfx>%?\\t%0, %1\";
+ return "lda<sync_sfx>%?\t%0, %1";
}
else
{
if (TARGET_THUMB1)
- return \"lda<sync_sfx>\\t%0, %1\";
+ return "ldr<sync_sfx>\t%0, %1";
else
- return \"lda<sync_sfx>%?\\t%0, %1\";
+ return "ldr<sync_sfx>%?\t%0, %1";
}
}
[(set_attr "arch" "32,v8mb,any")
@@ -97,20 +96,19 @@
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
- if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
+ if (aarch_mm_needs_release (operands[2]))
{
if (TARGET_THUMB1)
- return \"str<sync_sfx>\t%1, %0\";
+ return "stl<sync_sfx>\t%1, %0";
else
- return \"str<sync_sfx>%?\t%1, %0\";
+ return "stl<sync_sfx>%?\t%1, %0";
}
else
{
if (TARGET_THUMB1)
- return \"stl<sync_sfx>\t%1, %0\";
+ return "str<sync_sfx>\t%1, %0";
else
- return \"stl<sync_sfx>%?\t%1, %0\";
+ return "str<sync_sfx>%?\t%1, %0";
}
}
[(set_attr "arch" "32,v8mb,any")