summaryrefslogtreecommitdiff
path: root/src/include/storage/s_lock.h
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2015-08-31 18:10:04 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2015-08-31 18:10:04 -0400
commit123c9d2fc1fe0a8ee676d8244198b34a5e99ea90 (patch)
treec1e66a666c2a035892bc1dec23b015d4c812ec02 /src/include/storage/s_lock.h
parent049a7799dfce096923da27a9b0e4a3c7a0a47104 (diff)
downloadpostgresql-123c9d2fc1fe0a8ee676d8244198b34a5e99ea90.tar.gz
Clean up icc + ia64 situation.
Some googling turned up multiple sources saying that older versions of icc do not accept gcc-compatible asm blocks on IA64, though asm does work on x86[_64]. This is apparently fixed as of icc version 12.0 or so, but that doesn't help us much; if we have to carry the extra implementation anyway, we may as well just use it for icc rather than add a compiler version test. Hence, revert commit 2c713d6ea29c91cd2cbd92fa801a61e55ea2a3c4 (though I separated the icc code from the gcc code completely, producing what seems cleaner code). Document the state of affairs more explicitly, both in s_lock.h and postgres.c, and make some cosmetic adjustments around the IA64 code in s_lock.h.
Diffstat (limited to 'src/include/storage/s_lock.h')
-rw-r--r--src/include/storage/s_lock.h29
1 files changed, 17 insertions, 12 deletions
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 3e292caa0e..3c4fec1b7d 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -266,6 +266,10 @@ spin_delay(void)
* any explicit statement on that in the gcc manual. In Intel's compiler,
* the -m[no-]serialize-volatile option controls that, and testing shows that
* it is enabled by default.
+ *
+ * While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
+ * (at least not in icc versions before 12.x). So we have to carry a separate
+ * compiler-intrinsic-based implementation for it.
*/
#define HAS_TEST_AND_SET
@@ -303,6 +307,10 @@ tas(volatile slock_t *lock)
return ret;
}
+/* icc can't use the regular gcc S_UNLOCK() macro either in this case */
+#define S_UNLOCK(lock) \
+ do { __memory_barrier(); *(lock) = 0; } while (0)
+
#endif /* __INTEL_COMPILER */
#endif /* __ia64__ || __ia64 */
@@ -671,22 +679,19 @@ typedef unsigned char slock_t;
#endif
/*
- * Note that this implementation is unsafe for any platform that can speculate
+ * Default implementation of S_UNLOCK() for gcc/icc.
+ *
+ * Note that this implementation is unsafe for any platform that can reorder
* a memory access (either load or store) after a following store. That
- * happens not to be possible x86 and most legacy architectures (some are
+ * happens not to be possible on x86 and most legacy architectures (some are
* single-processor!), but many modern systems have weaker memory ordering.
- * Those that do must define their own version S_UNLOCK() rather than relying
- * on this one.
+ * Those that do must define their own version of S_UNLOCK() rather than
+ * relying on this one.
*/
#if !defined(S_UNLOCK)
-#if defined(__INTEL_COMPILER)
-#define S_UNLOCK(lock) \
- do { __memory_barrier(); *(lock) = 0; } while (0)
-#else
#define S_UNLOCK(lock) \
do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0)
#endif
-#endif
#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
@@ -793,7 +798,7 @@ tas(volatile slock_t *lock)
#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
/*
- * HP-UX on Itanium, non-gcc compiler
+ * HP-UX on Itanium, non-gcc/icc compiler
*
* We assume that the compiler enforces strict ordering of loads/stores on
* volatile data (see comments on the gcc-version earlier in this file).
@@ -816,7 +821,7 @@ typedef unsigned int slock_t;
#define S_UNLOCK(lock) \
do { _Asm_mf(); (*(lock)) = 0; } while (0)
-#endif /* HPUX on IA64, non gcc */
+#endif /* HPUX on IA64, non gcc/icc */
#if defined(_AIX) /* AIX */
/*
@@ -935,7 +940,7 @@ extern int tas_sema(volatile slock_t *lock);
#if !defined(S_UNLOCK)
/*
* Our default implementation of S_UNLOCK is essentially *(lock) = 0. This
- * is unsafe if the platform can speculate a memory access (either load or
+ * is unsafe if the platform can reorder a memory access (either load or
* store) after a following store; platforms where this is possible must
* define their own S_UNLOCK. But CPU reordering is not the only concern:
* if we simply defined S_UNLOCK() as an inline macro, the compiler might