summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/atomic/generic-msvc.h1
-rw-r--r--include/my_atomic.h9
-rw-r--r--include/my_cpu.h93
-rw-r--r--include/mysql/service_wsrep.h16
4 files changed, 108 insertions, 11 deletions
diff --git a/include/atomic/generic-msvc.h b/include/atomic/generic-msvc.h
index 754f0bfa8b4..c812aebae90 100644
--- a/include/atomic/generic-msvc.h
+++ b/include/atomic/generic-msvc.h
@@ -120,7 +120,6 @@ static __inline int my_yield_processor()
return 1;
}
-#define LF_BACKOFF my_yield_processor()
#else /* cleanup */
#undef IL_EXCHG_ADD32
diff --git a/include/my_atomic.h b/include/my_atomic.h
index aa5c617e593..d6aee503dbe 100644
--- a/include/my_atomic.h
+++ b/include/my_atomic.h
@@ -346,15 +346,6 @@ make_atomic_store(ptr)
#undef make_atomic_fas_body
#undef intptr
-/*
- the macro below defines (as an expression) the code that
- will be run in spin-loops. Intel manuals recummend to have PAUSE there.
- It is expected to be defined in include/atomic/ *.h files
-*/
-#ifndef LF_BACKOFF
-#define LF_BACKOFF (1)
-#endif
-
#define MY_ATOMIC_OK 0
#define MY_ATOMIC_NOT_1CPU 1
extern int my_atomic_initialize();
diff --git a/include/my_cpu.h b/include/my_cpu.h
index ebabe6c7202..aff92d22131 100644
--- a/include/my_cpu.h
+++ b/include/my_cpu.h
@@ -1,3 +1,5 @@
+#ifndef MY_CPU_INCLUDED
+#define MY_CPU_INCLUDED
/* Copyright (c) 2013, MariaDB foundation Ab and SkySQL
This program is free software; you can redistribute it and/or modify
@@ -42,3 +44,94 @@
#define HMT_medium_high()
#define HMT_high()
#endif
+
+#if defined __i386__ || defined __x86_64__ || defined _WIN32
+# define HAVE_PAUSE_INSTRUCTION /* added in Intel Pentium 4 */
+#endif
+
+#ifdef _WIN32
+#elif defined HAVE_PAUSE_INSTRUCTION
+#elif defined(_ARCH_PWR8)
+#else
+# include "my_global.h"
+# include "my_atomic.h"
+#endif
+
+static inline void MY_RELAX_CPU(void)
+{
+#ifdef _WIN32
+ /*
+ In the Win32 API, the x86 PAUSE instruction is executed by calling
+ the YieldProcessor macro defined in WinNT.h. It is a CPU architecture-
+ independent way by using YieldProcessor.
+ */
+ YieldProcessor();
+#elif defined HAVE_PAUSE_INSTRUCTION
+ /*
+ According to the gcc info page, asm volatile means that the
+ instruction has important side-effects and must not be removed.
+ Also asm volatile may trigger a memory barrier (spilling all registers
+ to memory).
+ */
+#ifdef __SUNPRO_CC
+ asm ("pause" );
+#else
+ __asm__ __volatile__ ("pause");
+#endif
+#elif defined(_ARCH_PWR8)
+ __ppc_get_timebase();
+#else
+ int32 var, oldval = 0;
+ my_atomic_cas32_strong_explicit(&var, &oldval, 1, MY_MEMORY_ORDER_RELAXED,
+ MY_MEMORY_ORDER_RELAXED);
+#endif
+}
+
+
+#ifdef HAVE_PAUSE_INSTRUCTION
+# ifdef __cplusplus
+extern "C" {
+# endif
+extern unsigned my_cpu_relax_multiplier;
+void my_cpu_init(void);
+# ifdef __cplusplus
+}
+# endif
+#else
+# define my_cpu_relax_multiplier 200
+# define my_cpu_init() /* nothing */
+#endif
+
+/*
+ LF_BACKOFF should be used to improve performance on hyperthreaded CPUs. Intel
+ recommends to use it in spin loops also on non-HT machines to reduce power
+ consumption (see e.g http://softwarecommunity.intel.com/articles/eng/2004.htm)
+
+ Running benchmarks for spinlocks implemented with InterlockedCompareExchange
+ and YieldProcessor shows that much better performance is achieved by calling
+ YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
+ loop count in the range 200-300 brought best results.
+*/
+
+static inline int LF_BACKOFF(void)
+{
+ unsigned i= my_cpu_relax_multiplier;
+ while (i--)
+ MY_RELAX_CPU();
+ return 1;
+}
+/**
+ Run a delay loop while waiting for a shared resource to be released.
+ @param delay originally, roughly microseconds on 100 MHz Intel Pentium
+*/
+
+static inline void ut_delay(unsigned delay)
+{
+ unsigned i= my_cpu_relax_multiplier / 4 * delay;
+ HMT_low();
+ while (i--)
+ MY_RELAX_CPU();
+ HMT_medium();
+}
+
+#endif
diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h
index 923ba57fcdc..1ce7ffd0991 100644
--- a/include/mysql/service_wsrep.h
+++ b/include/mysql/service_wsrep.h
@@ -97,7 +97,8 @@ extern struct wsrep_service_st {
enum wsrep_exec_mode (*wsrep_thd_exec_mode_func)(THD *thd);
const char * (*wsrep_thd_exec_mode_str_func)(THD *thd);
enum wsrep_conflict_state (*wsrep_thd_get_conflict_state_func)(MYSQL_THD);
- my_bool (*wsrep_thd_is_BF_func)(MYSQL_THD , my_bool);
+ my_bool (*wsrep_thd_is_aborting_func)(const MYSQL_THD thd);
+ my_bool (*wsrep_thd_is_BF_func)(MYSQL_THD, my_bool);
my_bool (*wsrep_thd_is_wsrep_func)(MYSQL_THD thd);
const char * (*wsrep_thd_query_func)(THD *thd);
enum wsrep_query_state (*wsrep_thd_query_state_func)(THD *thd);
@@ -111,10 +112,14 @@ extern struct wsrep_service_st {
void (*wsrep_set_load_multi_commit_func)(THD *thd, bool split);
bool (*wsrep_is_load_multi_commit_func)(THD *thd);
int (*wsrep_trx_is_aborting_func)(MYSQL_THD thd);
+ my_bool (*wsrep_thd_bf_abort_func)(MYSQL_THD bf_thd,
+ MYSQL_THD victim_thd,
+ my_bool signal);
int (*wsrep_trx_order_before_func)(MYSQL_THD, MYSQL_THD);
void (*wsrep_unlock_rollback_func)();
void (*wsrep_set_data_home_dir_func)(const char *data_dir);
my_bool (*wsrep_thd_is_applier_func)(MYSQL_THD);
+ bool (*wsrep_thd_set_wsrep_aborter_func)(MYSQL_THD bf_thd, MYSQL_THD thd);
} *wsrep_service;
#ifdef MYSQL_DYNAMIC_PLUGIN
@@ -143,6 +148,7 @@ extern struct wsrep_service_st {
#define wsrep_thd_exec_mode(T) wsrep_service->wsrep_thd_exec_mode_func(T)
#define wsrep_thd_exec_mode_str(T) wsrep_service->wsrep_thd_exec_mode_str_func(T)
#define wsrep_thd_get_conflict_state(T) wsrep_service->wsrep_thd_get_conflict_state_func(T)
+#define wsrep_thd_is_aborting(T) wsrep_service->wsrep_thd_is_aborting_func(T)
#define wsrep_thd_is_BF(T,S) wsrep_service->wsrep_thd_is_BF_func(T,S)
#define wsrep_thd_is_wsrep(T) wsrep_service->wsrep_thd_is_wsrep_func(T)
#define wsrep_thd_query(T) wsrep_service->wsrep_thd_query_func(T)
@@ -157,10 +163,12 @@ extern struct wsrep_service_st {
#define wsrep_set_load_multi_commit(T,S) wsrep_service->wsrep_set_load_multi_commit_func(T,S)
#define wsrep_is_load_multi_commit(T) wsrep_service->wsrep_is_load_multi_commit_func(T)
#define wsrep_trx_is_aborting(T) wsrep_service->wsrep_trx_is_aborting_func(T)
+#define wsrep_thd_bf_abort(T,T2,S) wsrep_service->wsrep_thd_bf_abort_func(T,T2,S)
#define wsrep_trx_order_before(T1,T2) wsrep_service->wsrep_trx_order_before_func(T1,T2)
#define wsrep_unlock_rollback() wsrep_service->wsrep_unlock_rollback_func()
#define wsrep_set_data_home_dir(A) wsrep_service->wsrep_set_data_home_dir_func(A)
#define wsrep_thd_is_applier(T) wsrep_service->wsrep_thd_is_applier_func(T)
+#define wsrep_thd_set_wsrep_aborter(T) wsrep_service->wsrep_thd_set_wsrep_aborter_func(T1, T2)
#define wsrep_debug get_wsrep_debug()
#define wsrep_log_conflicts get_wsrep_log_conflicts()
@@ -195,6 +203,9 @@ int wsrep_is_wsrep_xid(const struct xid_t* xid);
int wsrep_on(MYSQL_THD thd);
int wsrep_thd_retry_counter(THD *thd);
int wsrep_trx_is_aborting(MYSQL_THD thd);
+my_bool wsrep_thd_bf_abort(MYSQL_THD bf_thd,
+ MYSQL_THD victim_thd,
+ my_bool signal);
int wsrep_trx_order_before(MYSQL_THD thd1, MYSQL_THD thd2);
long get_wsrep_protocol_version();
long long wsrep_thd_trx_seqno(THD *thd);
@@ -205,6 +216,7 @@ my_bool get_wsrep_recovery();
my_bool get_wsrep_load_data_splitting();
my_bool get_wsrep_log_conflicts();
my_bool wsrep_aborting_thd_contains(THD *thd);
+my_bool wsrep_thd_is_aborting(const MYSQL_THD thd);
my_bool wsrep_thd_is_BF(MYSQL_THD thd, my_bool sync);
my_bool wsrep_thd_is_wsrep(MYSQL_THD thd);
struct wsrep *get_wsrep();
@@ -223,6 +235,8 @@ bool wsrep_thd_ignore_table(THD *thd);
void wsrep_unlock_rollback();
void wsrep_set_data_home_dir(const char *data_dir);
my_bool wsrep_thd_is_applier(MYSQL_THD thd);
+bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd);
+
#endif
#ifdef __cplusplus