summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Lindström <jan.lindstrom@mariadb.com>2021-09-06 15:34:54 +0300
committerGitHub <noreply@github.com>2021-09-06 15:34:54 +0300
commit19be74d2169b7fb3dc06feae8d11389f97644273 (patch)
tree1435d788f48562a2f3236035fe66b2d1f2881d57
parentb921d29b8905e927f7376c5118088368f946ae41 (diff)
parentd8943b0cc3ecb10dc68542e7846f9411a35f0d69 (diff)
downloadmariadb-git-bb-10.6-ramesh.tar.gz
Merge branch '10.6' into bb-10.6-rameshbb-10.6-ramesh
-rw-r--r--CMakeLists.txt59
-rw-r--r--appveyor.yml29
-rw-r--r--cmake/os/Windows.cmake12
-rw-r--r--mysql-test/main/mysql_client_test.test2
-rw-r--r--storage/innobase/btr/btr0cur.cc6
-rw-r--r--storage/innobase/btr/btr0sea.cc12
-rw-r--r--storage/innobase/buf/buf0buf.cc4
-rw-r--r--storage/innobase/include/btr0cur.h4
-rw-r--r--storage/innobase/include/btr0pcur.h2
-rw-r--r--storage/innobase/include/btr0pcur.ic4
-rw-r--r--storage/innobase/include/btr0sea.h16
-rw-r--r--storage/innobase/include/dict0mem.h4
-rw-r--r--storage/innobase/include/lock0lock.h4
-rw-r--r--storage/innobase/include/rw_lock.h4
-rw-r--r--storage/innobase/include/srw_lock.h59
-rw-r--r--storage/innobase/include/sux_lock.h44
-rw-r--r--storage/innobase/include/trx0purge.h2
-rw-r--r--storage/innobase/include/trx0rseg.h2
-rw-r--r--storage/innobase/include/trx0trx.h2
-rw-r--r--storage/innobase/row/row0ins.cc7
-rw-r--r--storage/innobase/row/row0sel.cc2
-rw-r--r--storage/innobase/sync/srw_lock.cc218
-rw-r--r--storage/innobase/trx/trx0trx.cc2
-rw-r--r--storage/innobase/unittest/innodb_sync-t.cc4
-rw-r--r--tests/mysql_client_test.c3
-rw-r--r--win/appveyor_skip_tests.txt14
26 files changed, 352 insertions, 169 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b8b3f5c7d12..5301dc563af 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -33,22 +33,11 @@ PROJECT(MySQL)
# in RPM's:
#set(CPACK_RPM_SPEC_MORE_DEFINE "%define __spec_install_post /bin/true")
-
-IF(POLICY CMP0022)
- CMAKE_POLICY(SET CMP0022 NEW)
-ENDIF()
-IF(POLICY CMP0048)
- CMAKE_POLICY(SET CMP0048 NEW)
-ENDIF()
-IF(POLICY CMP0054)
- CMAKE_POLICY(SET CMP0054 NEW)
-ENDIF()
-IF(POLICY CMP0075)
- CMAKE_POLICY(SET CMP0075 NEW)
-ENDIF()
-IF(POLICY CMP0069)
- CMAKE_POLICY(SET CMP0069 NEW)
-ENDIF()
+FOREACH(p CMP0022 CMP0046 CMP0040 CMP0048 CMP0054 CMP0075 CMP0069)
+ IF(POLICY ${p})
+ CMAKE_POLICY(SET ${p} NEW)
+ ENDIF()
+ENDFOREACH()
MESSAGE(STATUS "Running cmake version ${CMAKE_VERSION}")
@@ -563,3 +552,41 @@ IF(NON_DISTRIBUTABLE_WARNING)
MESSAGE(WARNING "
You have linked MariaDB with ${NON_DISTRIBUTABLE_WARNING} libraries! You may not distribute the resulting binary. If you do, you will put yourself into a legal problem with the Free Software Foundation.")
ENDIF()
+
+IF(NOT WITHOUT_SERVER)
+ # Define target for minimal mtr-testable build
+ ADD_CUSTOM_TARGET(minbuild)
+ ADD_DEPENDENCIES(minbuild
+ aria_chk
+ aria_pack
+ mariadb
+ mariadb-admin
+ mariadb-binlog
+ mariadb-check
+ mariadb-client-test
+ mariadb-conv
+ mariadb-dump
+ mariadb-import
+ mariadb-plugin
+ mariadb-show
+ mariadb-slap
+ mariadb-test
+ mariadb-tzinfo-to-sql
+ mariadb-upgrade
+ mariadbd
+ my_print_defaults
+ my_safe_process
+ myisam_ftdump
+ myisamchk
+ myisamlog
+ myisampack
+ perror
+ replace)
+ IF(WIN32)
+ ADD_DEPENDENCIES(minbuild echo mariadb-install-db my_safe_kill)
+ ENDIF()
+ ADD_CUSTOM_TARGET(smoketest
+ COMMAND perl ./mysql-test-run.pl main.1st
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/mysql-test)
+ ADD_DEPENDENCIES(smoketest minbuild)
+ENDIF()
diff --git a/appveyor.yml b/appveyor.yml
index 355c7f5aeeb..c4abddf6665 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,21 +1,24 @@
version: build-{build}~branch-{branch}
-before_build:
- - md %APPVEYOR_BUILD_FOLDER%\win_build
- - cd %APPVEYOR_BUILD_FOLDER%\win_build
- - cmake .. -DWITH_UNIT_TESTS=0 -DWITH_MARIABACKUP=0 -DMYSQL_MAINTAINER_MODE=ERR -DPLUGIN_ROCKSDB=NO -DPLUGIN_CONNECT=NO -DBISON_EXECUTABLE=C:\cygwin64\bin\bison
+clone_depth: 1
-build:
- project: win_build\MySQL.sln
- parallel: true
- verbosity: minimal
-
-configuration: RelWithDebInfo
-platform: x64
+build_script:
+ # dump some system info
+ - echo processor='%PROCESSOR_IDENTIFIER%' , processor count= %NUMBER_OF_PROCESSORS%
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - mkdir _build
+ - cd _build
+ - set BUILD_TYPE=MinSizeRel
+ - set GENERATOR=-GNinja
+ - call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat"
+ - cmake -E time cmake %GENERATOR% .. -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -DMYSQL_MAINTAINER_MODE=ERR -DFAST_BUILD=1 -DBISON_EXECUTABLE=C:\cygwin64\bin\bison -DPLUGIN_PERFSCHEMA=NO -DPLUGIN_FEEDBACK=NO
+ - set /A jobs=2*%NUMBER_OF_PROCESSORS%
+ - cmake -E time cmake --build . -j %jobs% --config %BUILD_TYPE% --target minbuild
test_script:
- set PATH=C:\Strawberry\perl\bin;%PATH%;C:\Program Files (x86)\Windows Kits\10\Debuggers\x64
- - cd %APPVEYOR_BUILD_FOLDER%\win_build\mysql-test
- - perl mysql-test-run.pl --force --max-test-fail=10 --parallel=4 --testcase-timeout=10 --suite=main
+ - cd %APPVEYOR_BUILD_FOLDER%\_build\mysql-test
+ - set /A parallel=4*%NUMBER_OF_PROCESSORS%
+ - perl mysql-test-run.pl --force --max-test-fail=10 --retry=2 -parallel=%parallel% --testcase-timeout=3 --suite=main --skip-test-list=%APPVEYOR_BUILD_FOLDER%\win\appveyor_skip_tests.txt --mysqld=--loose-innodb-flush-log-at-trx-commit=2
image: Visual Studio 2019
diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake
index 232560ff7b1..d9a0dfba46a 100644
--- a/cmake/os/Windows.cmake
+++ b/cmake/os/Windows.cmake
@@ -116,7 +116,7 @@ ENDMACRO()
IF(MSVC)
IF(MSVC_VERSION LESS 1920)
- MESSAGE(FATAL_ERROR "Visual Studio q2019 or later is required")
+ MESSAGE(FATAL_ERROR "Visual Studio 2019 or later is required")
ENDIF()
# Disable mingw based pkg-config found in Strawberry perl
SET(PKG_CONFIG_EXECUTABLE 0 CACHE INTERNAL "")
@@ -254,12 +254,12 @@ IF(MSVC)
ENDFOREACH()
ENDFOREACH()
ENDIF()
- IF(MSVC_VERSION LESS 1910)
- # Noisy warning C4800: 'type': forcing value to bool 'true' or 'false' (performance warning),
- # removed in VS2017
- STRING(APPEND CMAKE_CXX_FLAGS " /wd4800")
+
+ IF(FAST_BUILD)
+ STRING (REGEX REPLACE "/RTC(su|[1su])" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
ELSEIF (NOT CLANG_CL)
- STRING(APPEND CMAKE_CXX_FLAGS " /d2OptimizeHugeFunctions")
+ STRING(APPEND CMAKE_CXX_FLAGS_RELEASE " /d2OptimizeHugeFunctions")
+ STRING(APPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO " /d2OptimizeHugeFunctions")
ENDIF()
ENDIF()
diff --git a/mysql-test/main/mysql_client_test.test b/mysql-test/main/mysql_client_test.test
index 9fb7bcd81c9..bcb246a8a06 100644
--- a/mysql-test/main/mysql_client_test.test
+++ b/mysql-test/main/mysql_client_test.test
@@ -1,7 +1,5 @@
# This test should work in embedded server after we fix mysqltest
-- source include/not_embedded.inc
-# need to have the dynamic loading turned on for the client plugin tests
---source include/have_plugin_auth.inc
# Run test with default character set
--source include/default_charset.inc
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 3a041ba11db..e533f93b199 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1247,7 +1247,7 @@ btr_cur_search_to_nth_level_func(
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr, /*!< in: mtr */
@@ -3611,7 +3611,7 @@ fail_err:
ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else if (index->table->is_temporary()) {
} else {
- srw_lock* ahi_latch = btr_search_sys.get_latch(*index);
+ srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert(
cursor, ahi_latch);
@@ -4331,7 +4331,7 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT
{
- srw_lock* ahi_latch = block->index
+ srw_spin_lock* ahi_latch = block->index
? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) {
/* TO DO: Can we skip this if none of the fields
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 6fb4a01951a..b95fbbe694a 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -1015,7 +1015,7 @@ btr_search_guess_on_hash(
ulint mode,
ulint latch_mode,
btr_cur_t* cursor,
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
mtr_t* mtr)
{
ulint fold;
@@ -1460,7 +1460,7 @@ void
btr_search_build_page_hash_index(
dict_index_t* index,
buf_block_t* block,
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
uint16_t n_fields,
uint16_t n_bytes,
bool left_side)
@@ -1660,7 +1660,7 @@ exit_func:
@param[in,out] cursor cursor which was just positioned */
void btr_search_info_update_slow(btr_search_t *info, btr_cur_t *cursor)
{
- srw_lock* ahi_latch = &btr_search_sys.get_part(*cursor->index)
+ srw_spin_lock* ahi_latch = &btr_search_sys.get_part(*cursor->index)
->latch;
buf_block_t* block = btr_cur_get_block(cursor);
@@ -1727,7 +1727,7 @@ btr_search_move_or_delete_hash_entries(
assert_block_ahi_valid(block);
assert_block_ahi_valid(new_block);
- srw_lock* ahi_latch = index
+ srw_spin_lock* ahi_latch = index
? &btr_search_sys.get_part(*index)->latch
: nullptr;
@@ -1852,7 +1852,7 @@ void btr_search_update_hash_on_delete(btr_cur_t *cursor)
inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
- srw_lock *ahi_latch)
+ srw_spin_lock *ahi_latch)
{
buf_block_t* block;
dict_index_t* index;
@@ -1925,7 +1925,7 @@ func_exit:
to the cursor
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor,
- srw_lock *ahi_latch)
+ srw_spin_lock *ahi_latch)
{
buf_block_t* block;
dict_index_t* index;
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 58820c7b906..426b21a8308 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -282,7 +282,7 @@ void page_hash_latch::read_lock_wait()
/* First, try busy spinning for a while. */
for (auto spin= srv_n_spin_wait_rounds; spin--; )
{
- ut_delay(srv_spin_wait_delay);
+ LF_BACKOFF();
if (read_trylock())
return;
}
@@ -301,7 +301,7 @@ void page_hash_latch::write_lock_wait()
{
if (write_lock_poll())
return;
- ut_delay(srv_spin_wait_delay);
+ LF_BACKOFF();
}
/* Fall back to yielding to other threads. */
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 6668fb6ca67..c7f25aff4b7 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -201,7 +201,7 @@ btr_cur_search_to_nth_level_func(
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr, /*!< in/out: mini-transaction */
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index 312e51971bb..62f82632c62 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -135,7 +135,7 @@ btr_pcur_open_with_no_init_func(
that the ahi_latch protects the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr); /*!< in: mtr */
diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic
index 5840c34eecb..3853db88a8e 100644
--- a/storage/innobase/include/btr0pcur.ic
+++ b/storage/innobase/include/btr0pcur.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -479,7 +479,7 @@ btr_pcur_open_with_no_init_func(
that the ahi_latch protects the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in: mtr */
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 31ca4ba53dd..4339c895400 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -81,7 +81,7 @@ btr_search_guess_on_hash(
ulint mode,
ulint latch_mode,
btr_cur_t* cursor,
- srw_lock* ahi_latch,
+ srw_spin_lock* ahi_latch,
mtr_t* mtr);
/** Move or delete hash entries for moved records, usually in a page split.
@@ -114,7 +114,7 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id);
inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
- srw_lock *ahi_latch);
+ srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is inserted on a page.
@param[in,out] cursor cursor which was positioned to the
@@ -123,7 +123,7 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
to the cursor
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor,
- srw_lock *ahi_latch);
+ srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is deleted from a page.
@param[in] cursor cursor which was positioned on the record to delete
@@ -237,7 +237,7 @@ struct btr_search_sys_t
struct partition
{
/** latches protecting hash_table */
- srw_lock latch;
+ srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
@@ -249,8 +249,8 @@ struct btr_search_sys_t
#pragma warning(disable : 4200)
#endif
- char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(srw_lock) -
- sizeof(hash_table_t) - sizeof(mem_heap_t)) &
+ char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
+ sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
#ifdef _MSC_VER
@@ -306,7 +306,7 @@ struct btr_search_sys_t
}
/** Get the search latch for the adaptive hash index partition */
- srw_lock *get_latch(const dict_index_t &index) const
+ srw_spin_lock *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
@@ -351,7 +351,7 @@ inline ulint dict_index_t::n_ahi_pages() const
{
if (!btr_search_enabled)
return 0;
- srw_lock *latch= &btr_search_sys.get_part(*this)->latch;
+ srw_spin_lock *latch= &btr_search_sys.get_part(*this)->latch;
latch->rd_lock(SRW_LOCK_CALL);
ulint ref_count= search_info->ref_count;
latch->rd_unlock();
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 417a65f1b09..04debaa7869 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -2268,10 +2268,10 @@ public:
lock_t* autoinc_lock;
/** Mutex protecting autoinc and freed_indexes. */
- srw_mutex autoinc_mutex;
+ srw_spin_mutex autoinc_mutex;
private:
/** Mutex protecting locks on this table. */
- srw_mutex lock_mutex;
+ srw_spin_mutex lock_mutex;
#ifdef UNIV_DEBUG
/** The owner of lock_mutex (0 if none) */
Atomic_relaxed<os_thread_id_t> lock_mutex_owner{0};
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 42e8bf4ad22..859441afcc0 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -584,7 +584,7 @@ class lock_sys_t
#else
{
private:
- srw_lock_low lock;
+ srw_spin_lock_low lock;
public:
/** Try to acquire a lock */
bool try_acquire() { return lock.wr_lock_try(); }
@@ -666,7 +666,7 @@ private:
bool m_initialised;
/** mutex proteting the locks */
- MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_lock latch;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
#ifdef UNIV_DEBUG
/** The owner of exclusive latch (0 if none); protected by latch */
std::atomic<os_thread_id_t> writer{0};
diff --git a/storage/innobase/include/rw_lock.h b/storage/innobase/include/rw_lock.h
index 3f1d76d8d97..cd176f0b35b 100644
--- a/storage/innobase/include/rw_lock.h
+++ b/storage/innobase/include/rw_lock.h
@@ -124,8 +124,8 @@ protected:
}
DBUG_ASSERT((l & ~WRITER_WAITING) == UPDATER);
/* Any thread that had set WRITER_WAITING will eventually be woken
- up by ssux_lock_low::x_unlock() or ssux_lock_low::u_unlock()
- (not ssux_lock_low::wr_u_downgrade() to keep the code simple). */
+ up by ssux_lock_impl::x_unlock() or ssux_lock_impl::u_unlock()
+ (not ssux_lock_impl::wr_u_downgrade() to keep the code simple). */
return true;
}
/** Downgrade an exclusive lock to an update lock. */
diff --git a/storage/innobase/include/srw_lock.h b/storage/innobase/include/srw_lock.h
index fdac659d494..b24e0a30857 100644
--- a/storage/innobase/include/srw_lock.h
+++ b/storage/innobase/include/srw_lock.h
@@ -32,11 +32,14 @@ public:
void wr_unlock() { pthread_mutex_unlock(&lock); }
bool wr_lock_try() { return !pthread_mutex_trylock(&lock); }
};
+typedef srw_mutex srw_spin_mutex;
#else
/** Futex-based mutex */
-class srw_mutex final
+template<bool spinloop>
+class srw_mutex_impl final
{
- /** The lock word, containing HOLDER and a count of waiters */
+ /** The lock word, containing HOLDER + 1 if the lock is being held,
+ plus the number of waiters */
std::atomic<uint32_t> lock;
/** Identifies that the lock is being held */
static constexpr uint32_t HOLDER= 1U << 31;
@@ -62,7 +65,7 @@ public:
bool wr_lock_try()
{
uint32_t lk= 0;
- return lock.compare_exchange_strong(lk, HOLDER,
+ return lock.compare_exchange_strong(lk, HOLDER + 1,
std::memory_order_acquire,
std::memory_order_relaxed);
}
@@ -70,18 +73,27 @@ public:
void wr_lock() { if (!wr_lock_try()) wait_and_lock(); }
void wr_unlock()
{
- const uint32_t lk= lock.fetch_and(~HOLDER, std::memory_order_release);
- if (lk != HOLDER)
+ const uint32_t lk= lock.fetch_sub(HOLDER + 1, std::memory_order_release);
+ if (lk != HOLDER + 1)
{
DBUG_ASSERT(lk & HOLDER);
wake();
}
}
};
+
+typedef srw_mutex_impl<true> srw_spin_mutex;
+typedef srw_mutex_impl<false> srw_mutex;
+#endif
+
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
+# else
+template<bool spinlock> class srw_lock_impl;
#endif
/** Slim shared-update-exclusive lock with no recursion */
-class ssux_lock_low final
+template<bool spinloop>
+class ssux_lock_impl final
#ifdef SUX_LOCK_GENERIC
: private rw_lock
#endif
@@ -91,7 +103,7 @@ class ssux_lock_low final
# ifdef SUX_LOCK_GENERIC
# elif defined _WIN32
# else
- friend class srw_lock;
+ friend srw_lock_impl<spinloop>;
# endif
#endif
#ifdef SUX_LOCK_GENERIC
@@ -258,7 +270,7 @@ public:
class srw_lock_low
{
# ifdef UNIV_PFS_RWLOCK
- friend class srw_lock;
+ friend class srw_lock_impl;
# endif
SRWLOCK lock;
public:
@@ -271,12 +283,14 @@ public:
bool wr_lock_try() { return TryAcquireSRWLockExclusive(&lock); }
void wr_unlock() { ReleaseSRWLockExclusive(&lock); }
};
+
+typedef srw_lock_low srw_spin_lock_low;
#elif defined SUX_LOCK_GENERIC
/** Slim read-write lock */
class srw_lock_low
{
# ifdef UNIV_PFS_RWLOCK
- friend class srw_lock;
+ friend class srw_lock_impl;
# endif
rw_lock_t lock;
public:
@@ -289,8 +303,10 @@ public:
bool wr_lock_try() { return !rw_trywrlock(&lock); }
void wr_unlock() { rw_unlock(&lock); }
};
+typedef srw_lock_low srw_spin_lock_low;
#else
-typedef ssux_lock_low srw_lock_low;
+typedef ssux_lock_impl<false> srw_lock_low;
+typedef ssux_lock_impl<true> srw_spin_lock_low;
#endif
#ifndef UNIV_PFS_RWLOCK
@@ -298,7 +314,7 @@ typedef ssux_lock_low srw_lock_low;
# define SRW_LOCK_ARGS(file, line) /* nothing */
# define SRW_LOCK_CALL /* nothing */
typedef srw_lock_low srw_lock;
-typedef ssux_lock_low ssux_lock;
+typedef srw_spin_lock_low srw_spin_lock;
#else
# define SRW_LOCK_INIT(key) init(key)
# define SRW_LOCK_ARGS(file, line) file, line
@@ -308,7 +324,7 @@ typedef ssux_lock_low ssux_lock;
class ssux_lock
{
PSI_rwlock *pfs_psi;
- ssux_lock_low lock;
+ ssux_lock_impl<false> lock;
ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line);
ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line);
@@ -382,10 +398,18 @@ public:
};
/** Slim reader-writer lock with PERFORMANCE_SCHEMA instrumentation */
-class srw_lock
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
+# else
+template<bool spinlock>
+# endif
+class srw_lock_impl
{
PSI_rwlock *pfs_psi;
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
srw_lock_low lock;
+# else
+ ssux_lock_impl<spinlock> lock;
+# endif
ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line);
ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line);
@@ -433,4 +457,13 @@ public:
bool rd_lock_try() { return lock.rd_lock_try(); }
bool wr_lock_try() { return lock.wr_lock_try(); }
};
+
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
+typedef srw_lock_impl srw_lock;
+typedef srw_lock_impl srw_spin_lock;
+# else
+typedef srw_lock_impl<false> srw_lock;
+typedef srw_lock_impl<true> srw_spin_lock;
+# endif
+
#endif
diff --git a/storage/innobase/include/sux_lock.h b/storage/innobase/include/sux_lock.h
index f967e8c81cd..c09915cf6de 100644
--- a/storage/innobase/include/sux_lock.h
+++ b/storage/innobase/include/sux_lock.h
@@ -27,12 +27,12 @@ this program; if not, write to the Free Software Foundation, Inc.,
/** A "fat" rw-lock that supports
S (shared), U (update, or shared-exclusive), and X (exclusive) modes
as well as recursive U and X latch acquisition
-@tparam srw ssux_lock_low or ssux_lock */
-template<typename srw>
+@tparam ssux ssux_lock_impl or ssux_lock */
+template<typename ssux>
class sux_lock final
{
/** The underlying non-recursive lock */
- srw lock;
+ ssux lock;
/** Numbers of U and X locks. Protected by lock. */
uint32_t recursive;
/** The owner of the U or X lock (0 if none); protected by lock */
@@ -270,20 +270,14 @@ public:
bool is_waiting() const { return lock.is_waiting(); }
};
-/** needed for dict_index_t::clone() */
-template<> inline void sux_lock<ssux_lock>::operator=(const sux_lock&)
-{
- memset((void*) this, 0, sizeof *this);
-}
-
-typedef sux_lock<ssux_lock_low> block_lock;
+typedef sux_lock<ssux_lock_impl<true>> block_lock;
#ifndef UNIV_PFS_RWLOCK
-typedef block_lock index_lock;
+typedef sux_lock<ssux_lock_impl<false>> index_lock;
#else
typedef sux_lock<ssux_lock> index_lock;
-template<> inline void sux_lock<ssux_lock_low>::init()
+template<> inline void sux_lock<ssux_lock_impl<true>>::init()
{
lock.init();
ut_ad(!writer.load(std::memory_order_relaxed));
@@ -340,8 +334,13 @@ inline void sux_lock<ssux_lock>::u_x_upgrade(const char *file, unsigned line)
}
#endif
-template<>
-inline void sux_lock<ssux_lock_low>::s_lock()
+/** needed for dict_index_t::clone() */
+template<> inline void index_lock::operator=(const sux_lock&)
+{
+ memset((void*) this, 0, sizeof *this);
+}
+
+template<typename ssux> inline void sux_lock<ssux>::s_lock()
{
ut_ad(!have_x());
ut_ad(!have_s());
@@ -349,8 +348,7 @@ inline void sux_lock<ssux_lock_low>::s_lock()
ut_d(s_lock_register());
}
-template<>
-inline void sux_lock<ssux_lock_low>::u_lock()
+template<typename ssux> inline void sux_lock<ssux>::u_lock()
{
os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id)
@@ -364,8 +362,7 @@ inline void sux_lock<ssux_lock_low>::u_lock()
}
}
-template<>
-inline void sux_lock<ssux_lock_low>::x_lock(bool for_io)
+template<typename ssux> inline void sux_lock<ssux>::x_lock(bool for_io)
{
os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id)
@@ -382,15 +379,14 @@ inline void sux_lock<ssux_lock_low>::x_lock(bool for_io)
}
}
-template<>
-inline void sux_lock<ssux_lock_low>::u_x_upgrade()
+template<typename ssux> inline void sux_lock<ssux>::u_x_upgrade()
{
ut_ad(have_u_not_x());
lock.u_wr_upgrade();
recursive/= RECURSIVE_U;
}
-template<> inline bool sux_lock<ssux_lock_low>::x_lock_upgraded()
+template<typename ssux> inline bool sux_lock<ssux>::x_lock_upgraded()
{
os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id)
@@ -417,8 +413,7 @@ template<> inline bool sux_lock<ssux_lock_low>::x_lock_upgraded()
}
}
-template<>
-inline bool sux_lock<ssux_lock_low>::u_lock_try(bool for_io)
+template<typename ssux> inline bool sux_lock<ssux>::u_lock_try(bool for_io)
{
os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id)
@@ -438,8 +433,7 @@ inline bool sux_lock<ssux_lock_low>::u_lock_try(bool for_io)
return false;
}
-template<>
-inline bool sux_lock<ssux_lock_low>::x_lock_try()
+template<typename ssux> inline bool sux_lock<ssux>::x_lock_try()
{
os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id)
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index c2c755e183a..417c6688e83 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -125,7 +125,7 @@ class purge_sys_t
{
public:
/** latch protecting view, m_enabled */
- MY_ALIGNED(CACHE_LINE_SIZE) mutable srw_lock latch;
+ MY_ALIGNED(CACHE_LINE_SIZE) mutable srw_spin_lock latch;
private:
/** The purge will not remove undo logs which are >= this view */
ReadViewBase view;
diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h
index 02e6f290c56..7d0147b3752 100644
--- a/storage/innobase/include/trx0rseg.h
+++ b/storage/innobase/include/trx0rseg.h
@@ -84,7 +84,7 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
/** tablespace containing the rollback segment; constant after init() */
fil_space_t *space;
/** latch protecting everything except page_no, space */
- srw_lock_low latch;
+ srw_spin_lock_low latch;
/** rollback segment header page number; constant after init() */
uint32_t page_no;
/** length of the TRX_RSEG_HISTORY list (number of transactions) */
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index d64fd019b85..343e7d42dc4 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -565,7 +565,7 @@ public:
private:
/** mutex protecting state and some of lock
(some are protected by lock_sys.latch) */
- srw_mutex mutex;
+ srw_spin_mutex mutex;
#ifdef UNIV_DEBUG
/** The owner of mutex (0 if none); protected by mutex */
std::atomic<os_thread_id_t> mutex_owner{0};
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 6f228142cba..b67c1212271 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -2501,6 +2501,13 @@ extern "C" int thd_is_slave(const MYSQL_THD thd);
# define thd_is_slave(thd) 0
#endif
+#if defined __aarch64__&&defined __GNUC__&&__GNUC__==4&&!defined __clang__
+/* Avoid GCC 4.8.5 internal compiler error due to srw_mutex::wr_unlock().
+We would only need this for row_ins_clust_index_entry_low(),
+but GCC 4.8.5 does not support pop_options. */
+# pragma GCC optimize ("no-expensive-optimizations")
+#endif
+
/***************************************************************//**
Tries to insert an entry into a clustered index, ignoring foreign key
constraints. If a record with the same unique key is found, the other
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 79da2b522d7..4a839082a6b 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -3930,7 +3930,7 @@ row_sel_try_search_shortcut_for_mysql(
ut_ad(!prebuilt->templ_contains_blob);
ut_ad(trx->read_view.is_open());
- srw_lock* ahi_latch = btr_search_sys.get_latch(*index);
+ srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
ahi_latch->rd_lock(SRW_LOCK_CALL);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc
index b76194c89e3..4cd4b56bdd0 100644
--- a/storage/innobase/sync/srw_lock.cc
+++ b/storage/innobase/sync/srw_lock.cc
@@ -20,8 +20,24 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "srv0srv.h"
#include "my_cpu.h"
+/** @return the parameter for srw_pause() */
+static inline unsigned srw_pause_delay()
+{
+ return my_cpu_relax_multiplier / 4 * srv_spin_wait_delay;
+}
+
+/** Pause the CPU for some time, with no memory accesses. */
+static inline void srw_pause(unsigned delay)
+{
+ HMT_low();
+ while (delay--)
+ MY_RELAX_CPU();
+ HMT_medium();
+}
+
#ifdef SUX_LOCK_GENERIC
-void ssux_lock_low::init()
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::init()
{
DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_init(&mutex, nullptr);
@@ -29,7 +45,8 @@ void ssux_lock_low::init()
pthread_cond_init(&cond_exclusive, nullptr);
}
-void ssux_lock_low::destroy()
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::destroy()
{
DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_destroy(&mutex);
@@ -37,7 +54,8 @@ void ssux_lock_low::destroy()
pthread_cond_destroy(&cond_exclusive);
}
-inline void ssux_lock_low::writer_wait(uint32_t l)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::writer_wait(uint32_t l)
{
pthread_mutex_lock(&mutex);
while (value() == l)
@@ -45,7 +63,8 @@ inline void ssux_lock_low::writer_wait(uint32_t l)
pthread_mutex_unlock(&mutex);
}
-inline void ssux_lock_low::readers_wait(uint32_t l)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::readers_wait(uint32_t l)
{
pthread_mutex_lock(&mutex);
while (value() == l)
@@ -53,7 +72,8 @@ inline void ssux_lock_low::readers_wait(uint32_t l)
pthread_mutex_unlock(&mutex);
}
-inline void ssux_lock_low::wake()
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wake()
{
pthread_mutex_lock(&mutex);
uint32_t l= value();
@@ -70,7 +90,8 @@ inline void ssux_lock_low::wake()
/** Wait for a read lock.
@param lock word value from a failed read_trylock() */
-void ssux_lock_low::read_lock(uint32_t l)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::read_lock(uint32_t l)
{
do
{
@@ -90,15 +111,19 @@ void ssux_lock_low::read_lock(uint32_t l)
pthread_mutex_unlock(&mutex);
continue;
}
- else
+ else if (spinloop)
+ {
+ const unsigned delay= srw_pause_delay();
+
for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
{
- ut_delay(srv_spin_wait_delay);
+ srw_pause(delay);
if (read_trylock<true>(l))
return;
else if (l == WRITER_WAITING)
goto wake_writer;
}
+ }
readers_wait(l);
}
@@ -107,7 +132,8 @@ void ssux_lock_low::read_lock(uint32_t l)
/** Wait for an update lock.
@param lock word value from a failed update_trylock() */
-void ssux_lock_low::update_lock(uint32_t l)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::update_lock(uint32_t l)
{
do
{
@@ -127,15 +153,19 @@ void ssux_lock_low::update_lock(uint32_t l)
pthread_mutex_unlock(&mutex);
continue;
}
- else
+ else if (spinloop)
+ {
+ const unsigned delay= srw_pause_delay();
+
for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
{
- ut_delay(srv_spin_wait_delay);
+ srw_pause(delay);
if (update_trylock(l))
return;
else if ((l | UPDATER) == (UPDATER | WRITER_WAITING))
goto wake_writer;
}
+ }
readers_wait(l);
}
@@ -144,21 +174,12 @@ void ssux_lock_low::update_lock(uint32_t l)
/** Wait for a write lock after a failed write_trylock() or upgrade_trylock()
@param holding_u whether we already hold u_lock() */
-void ssux_lock_low::write_lock(bool holding_u)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::write_lock(bool holding_u)
{
for (;;)
{
uint32_t l= write_lock_wait_start();
- /* We are the first writer to be granted the lock. Spin for a while. */
- for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
- {
- l= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
- if (write_lock_wait_try(l))
- return;
- if (!(l & WRITER_WAITING))
- l= write_lock_wait_start();
- ut_delay(srv_spin_wait_delay);
- }
const uint32_t e= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
l= e;
@@ -190,21 +211,34 @@ void ssux_lock_low::write_lock(bool holding_u)
}
}
-void ssux_lock_low::rd_unlock() { if (read_unlock()) wake(); }
-void ssux_lock_low::u_unlock() { update_unlock(); wake(); }
-void ssux_lock_low::wr_unlock() { write_unlock(); wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::rd_unlock() { if (read_unlock()) wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::u_unlock() { update_unlock(); wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wr_unlock() { write_unlock(); wake(); }
+
+template void ssux_lock_impl<false>::init();
+template void ssux_lock_impl<false>::destroy();
+template void ssux_lock_impl<false>::rd_unlock();
+template void ssux_lock_impl<false>::u_unlock();
+template void ssux_lock_impl<false>::wr_unlock();
#else /* SUX_LOCK_GENERIC */
static_assert(4 == sizeof(rw_lock), "ABI");
# ifdef _WIN32
# include <synchapi.h>
-inline void srw_mutex::wait(uint32_t lk)
+template<bool spinloop>
+inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&lock, &lk, 4, INFINITE); }
-void srw_mutex::wake() { WakeByAddressSingle(&lock); }
+template<bool spinloop>
+void srw_mutex_impl<spinloop>::wake() { WakeByAddressSingle(&lock); }
-inline void ssux_lock_low::wait(uint32_t lk)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&readers, &lk, 4, INFINITE); }
-void ssux_lock_low::wake() { WakeByAddressSingle(&readers); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wake() { WakeByAddressSingle(&readers); }
# else
# ifdef __linux__
@@ -221,49 +255,93 @@ void ssux_lock_low::wake() { WakeByAddressSingle(&readers); }
# error "no futex support"
# endif
-inline void srw_mutex::wait(uint32_t lk) { SRW_FUTEX(&lock, WAIT, lk); }
-void srw_mutex::wake() { SRW_FUTEX(&lock, WAKE, 1); }
+template<bool spinloop>
+inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
+{ SRW_FUTEX(&lock, WAIT, lk); }
+template<bool spinloop>
+void srw_mutex_impl<spinloop>::wake() { SRW_FUTEX(&lock, WAKE, 1); }
-inline void ssux_lock_low::wait(uint32_t lk) { SRW_FUTEX(&readers, WAIT, lk); }
-void ssux_lock_low::wake() { SRW_FUTEX(&readers, WAKE, 1); }
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
+{ SRW_FUTEX(&readers, WAIT, lk); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wake() { SRW_FUTEX(&readers, WAKE, 1); }
# endif
+template void srw_mutex_impl<false>::wake();
+template void ssux_lock_impl<false>::wake();
+template void srw_mutex_impl<true>::wake();
+template void ssux_lock_impl<true>::wake();
-void srw_mutex::wait_and_lock()
+template<>
+void srw_mutex_impl<true>::wait_and_lock()
{
uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
- for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
+
+ const unsigned delay= srw_pause_delay();
+
+ for (auto spin= srv_n_spin_wait_rounds;;)
+ {
+ DBUG_ASSERT(~HOLDER & lk);
+ if (lk & HOLDER)
+ lk= lock.load(std::memory_order_relaxed);
+ else
+ {
+ lk= lock.fetch_or(HOLDER, std::memory_order_relaxed);
+ if (!(lk & HOLDER))
+ goto acquired;
+ }
+ srw_pause(delay);
+ if (!--spin)
+ break;
+ }
+
+ for (;; wait(lk))
{
- lk&= ~HOLDER;
- DBUG_ASSERT(lk);
- while (!lock.compare_exchange_weak(lk, HOLDER | (lk - 1),
- std::memory_order_acquire,
- std::memory_order_relaxed))
+ if (lk & HOLDER)
+ {
+ lk= lock.load(std::memory_order_relaxed);
if (lk & HOLDER)
- goto occupied;
- return;
-occupied:
- ut_delay(srv_spin_wait_delay);
+ continue;
+ }
+ lk= lock.fetch_or(HOLDER, std::memory_order_relaxed);
+ if (!(lk & HOLDER))
+ {
+acquired:
+ DBUG_ASSERT(lk);
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return;
+ }
+ DBUG_ASSERT(lk > HOLDER);
}
+}
- for (;;)
+template<>
+void srw_mutex_impl<false>::wait_and_lock()
+{
+ uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
+ for (;; wait(lk))
{
- lk= lock.load(std::memory_order_relaxed);
- while (!(lk & HOLDER))
+ if (lk & HOLDER)
+ {
+ lk= lock.load(std::memory_order_relaxed);
+ if (lk & HOLDER)
+ continue;
+ }
+ lk= lock.fetch_or(HOLDER, std::memory_order_relaxed);
+ if (!(lk & HOLDER))
{
DBUG_ASSERT(lk);
- if (lock.compare_exchange_weak(lk, HOLDER | (lk - 1),
- std::memory_order_acquire,
- std::memory_order_relaxed))
- return;
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return;
}
DBUG_ASSERT(lk > HOLDER);
- wait(lk);
}
}
-void ssux_lock_low::wr_wait(uint32_t lk)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wr_wait(uint32_t lk)
{
DBUG_ASSERT(writer.is_locked());
DBUG_ASSERT(lk);
@@ -278,7 +356,11 @@ void ssux_lock_low::wr_wait(uint32_t lk)
while (lk != WRITER);
}
-void ssux_lock_low::rd_wait()
+template void ssux_lock_impl<true>::wr_wait(uint32_t);
+template void ssux_lock_impl<false>::wr_wait(uint32_t);
+
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::rd_wait()
{
for (;;)
{
@@ -297,10 +379,22 @@ void ssux_lock_low::rd_wait()
}
writer.wr_unlock();
}
+
+template void ssux_lock_impl<true>::rd_wait();
+template void ssux_lock_impl<false>::rd_wait();
#endif /* SUX_LOCK_GENERIC */
#ifdef UNIV_PFS_RWLOCK
-void srw_lock::psi_rd_lock(const char *file, unsigned line)
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
+# define void_srw_lock void srw_lock_impl
+# else
+# define void_srw_lock template<bool spinloop> void srw_lock_impl<spinloop>
+template void srw_lock_impl<false>::psi_rd_lock(const char*, unsigned);
+template void srw_lock_impl<false>::psi_wr_lock(const char*, unsigned);
+template void srw_lock_impl<true>::psi_rd_lock(const char*, unsigned);
+template void srw_lock_impl<true>::psi_wr_lock(const char*, unsigned);
+# endif
+void_srw_lock::psi_rd_lock(const char *file, unsigned line)
{
PSI_rwlock_locker_state state;
const bool nowait= lock.rd_lock_try();
@@ -316,7 +410,7 @@ void srw_lock::psi_rd_lock(const char *file, unsigned line)
lock.rd_lock();
}
-void srw_lock::psi_wr_lock(const char *file, unsigned line)
+void_srw_lock::psi_wr_lock(const char *file, unsigned line)
{
PSI_rwlock_locker_state state;
const bool nowait= lock.wr_lock_try();
@@ -396,7 +490,7 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
DBUG_ASSERT(lock.writer.is_locked());
uint32_t lk= 1;
const bool nowait=
- lock.readers.compare_exchange_strong(lk, ssux_lock_low::WRITER,
+ lock.readers.compare_exchange_strong(lk, ssux_lock_impl<false>::WRITER,
std::memory_order_acquire,
std::memory_order_relaxed);
if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
@@ -412,4 +506,14 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
else if (!nowait)
lock.u_wr_upgrade();
}
+#else /* UNIV_PFS_RWLOCK */
+template void ssux_lock_impl<false>::rd_lock();
+# ifdef SUX_LOCK_GENERIC
+template void ssux_lock_impl<false>::write_lock(bool);
+template void ssux_lock_impl<false>::update_lock(uint32_t);
+# else
+template void ssux_lock_impl<false>::rd_unlock();
+template void ssux_lock_impl<false>::u_unlock();
+template void ssux_lock_impl<false>::wr_unlock();
+# endif
#endif /* UNIV_PFS_RWLOCK */
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index bb0b9882419..ad49d3e9c8e 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1275,7 +1275,7 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr)
/* Wait for any implicit-to-explicit lock conversions to cease,
so that there will be no race condition in lock_release(). */
while (UNIV_UNLIKELY(is_referenced()))
- ut_delay(srv_spin_wait_delay);
+ LF_BACKOFF();
}
else
ut_ad(read_only || !rsegs.m_redo.rseg);
diff --git a/storage/innobase/unittest/innodb_sync-t.cc b/storage/innobase/unittest/innodb_sync-t.cc
index 6cf0e648d9b..d0289086b24 100644
--- a/storage/innobase/unittest/innodb_sync-t.cc
+++ b/storage/innobase/unittest/innodb_sync-t.cc
@@ -62,7 +62,7 @@ static void test_srw_lock()
}
}
-static ssux_lock_low ssux;
+static ssux_lock_impl<false> ssux;
static void test_ssux_lock()
{
@@ -95,7 +95,7 @@ static void test_ssux_lock()
}
}
-static sux_lock<ssux_lock_low> sux;
+static sux_lock<ssux_lock_impl<true>> sux;
static void test_sux_lock()
{
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index 9c18667614b..8f4e2b2fdad 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -19203,6 +19203,9 @@ static void test_bug11766854()
struct st_mysql_client_plugin *plugin;
DBUG_ENTER("test_bug11766854");
+ if (!getenv("QA_AUTH_CLIENT_SO"))
+ DBUG_VOID_RETURN;
+
myheader("test_bug11766854");
plugin= mysql_load_plugin(mysql, "foo", -1, 0);
diff --git a/win/appveyor_skip_tests.txt b/win/appveyor_skip_tests.txt
new file mode 100644
index 00000000000..3f0a0874064
--- /dev/null
+++ b/win/appveyor_skip_tests.txt
@@ -0,0 +1,14 @@
+main.mysql_upgrade : Takes long time on Appveyor
+main.mysqlslap : Takes long time
+mysql.upgrade_view : Takes long time
+main.check : Takes long time on Appveyor
+main.mrr_icp_extra : Takes long time on Appveyor
+main.derived_opt : Takes long time on Appveyor
+main.trigger : Takes long time on Appveyor
+main.index_merge_myisam : Takes long time on Appveyor
+main.mysqldump : Takes long time on Appveyor
+main.derived : Takes long time on Appveyor
+main.multi_update : Takes long time on Appveyor
+main.index_merge_innodb : Takes long time on Appveyor
+main.count_distinct2 : Takes long time on Appveyor
+main.mysqltest : Takes long time on Appveyor