diff options
author | unknown <monty@donna.mysql.com> | 2001-02-17 14:19:19 +0200 |
---|---|---|
committer | unknown <monty@donna.mysql.com> | 2001-02-17 14:19:19 +0200 |
commit | 2662b59306ef0cd495fa6e2edf7129e58a11393a (patch) | |
tree | bfe39951a73e906579ab819bf5198ad8f3a64a36 /innobase/sync | |
parent | 66de55a56bdcf2f7a9c0c4f8e19b3e761475e202 (diff) | |
download | mariadb-git-2662b59306ef0cd495fa6e2edf7129e58a11393a.tar.gz |
Added Innobase to source distribution
Docs/manual.texi:
Added Innobase documentation
configure.in:
Incremented version
include/my_base.h:
Added option for Innobase
myisam/mi_check.c:
cleanup
mysql-test/t/bdb.test:
cleanup
mysql-test/t/innobase.test:
Extended with new tests from bdb.test
mysql-test/t/merge.test:
Added test of SHOW create
mysys/my_init.c:
Fix for UNIXWARE 7
scripts/mysql_install_db.sh:
Always write how to start mysqld
scripts/safe_mysqld.sh:
Fixed type
sql/ha_innobase.cc:
Update to new version
sql/ha_innobase.h:
Update to new version
sql/handler.h:
Added 'update_table_comment()' and 'append_create_info()'
sql/sql_delete.cc:
Fixes for Innobase
sql/sql_select.cc:
Fixes for Innobase
sql/sql_show.cc:
Append create information (for MERGE tables)
sql/sql_update.cc:
Fixes for Innobase
Diffstat (limited to 'innobase/sync')
-rw-r--r-- | innobase/sync/Makefile.am | 24 | ||||
-rw-r--r-- | innobase/sync/makefilewin | 17 | ||||
-rw-r--r-- | innobase/sync/sync0arr.c | 804 | ||||
-rw-r--r-- | innobase/sync/sync0ipm.c | 170 | ||||
-rw-r--r-- | innobase/sync/sync0rw.c | 906 | ||||
-rw-r--r-- | innobase/sync/sync0sync.c | 1179 | ||||
-rw-r--r-- | innobase/sync/ts/makefile | 14 | ||||
-rw-r--r-- | innobase/sync/ts/tssync.c | 1366 |
8 files changed, 4480 insertions, 0 deletions
diff --git a/innobase/sync/Makefile.am b/innobase/sync/Makefile.am new file mode 100644 index 00000000000..7504525bf84 --- /dev/null +++ b/innobase/sync/Makefile.am @@ -0,0 +1,24 @@ +# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +# & Innobase Oy +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +include ../include/Makefile.i + +libs_LIBRARIES = libsync.a + +libsync_a_SOURCES = sync0arr.c sync0ipm.c sync0rw.c sync0sync.c + +EXTRA_PROGRAMS = diff --git a/innobase/sync/makefilewin b/innobase/sync/makefilewin new file mode 100644 index 00000000000..5809d8e7375 --- /dev/null +++ b/innobase/sync/makefilewin @@ -0,0 +1,17 @@ +include ..\include\makefile.i + +sync.lib: sync0sync.obj sync0rw.obj sync0ipm.obj sync0arr.obj + lib -out:..\libs\sync.lib sync0sync.obj sync0rw.obj sync0ipm.obj sync0arr.obj + +sync0sync.obj: sync0sync.c + $(CCOM) $(CFLN) -c sync0sync.c + +sync0rw.obj: sync0rw.c + $(CCOM) $(CFL) -c sync0rw.c + +sync0ipm.obj: sync0ipm.c + $(CCOM) $(CFL) -c sync0ipm.c + +sync0arr.obj: sync0arr.c + $(CCOM) $(CFL) -c sync0arr.c + diff --git a/innobase/sync/sync0arr.c b/innobase/sync/sync0arr.c new file mode 100644 index 00000000000..193a60e36b6 --- /dev/null +++ b/innobase/sync/sync0arr.c @@ -0,0 +1,804 @@ +/****************************************************** +The wait array used in synchronization primitives + +(c) 1995 Innobase Oy + +Created 9/5/1995 Heikki Tuuri +*******************************************************/ + +#include "sync0arr.h" +#ifdef UNIV_NONINL +#include "sync0arr.ic" +#endif + +#include "sync0sync.h" +#include "sync0rw.h" +#include "os0sync.h" + +/* + WAIT ARRAY + ========== + +The wait array consists of cells each of which has an +an operating system event object created for it. The threads +waiting for a mutex, for example, can reserve a cell +in the array and suspend themselves to wait for the event +to become signaled. When using the wait array, remember to make +sure that some thread holding the synchronization object +will eventually know that there is a waiter in the array and +signal the object, to prevent infinite wait. +Why we chose to implement a wait array? First, to make +mutexes fast, we had to code our own implementation of them, +which only in usually uncommon cases resorts to using +slow operating system primitives. Then we had the choice of +assigning a unique OS event for each mutex, which would +be simpler, or using a global wait array. In some operating systems, +the global wait array solution is more efficient and flexible, +because we can do with a very small number of OS events, +say 200. In NT 3.51, allocating events seems to be a quadratic +algorithm, because 10 000 events are created fast, but +100 000 events takes a couple of minutes to create. +*/ + +/* A cell where an individual thread may wait suspended +until a resource is released. The suspending is implemented +using an operating system event semaphore. */ +struct sync_cell_struct { + void* wait_object; /* pointer to the object the + thread is waiting for; if NULL + the cell is free for use */ + ulint request_type; /* lock type requested on the + object */ + char* file; /* in debug version file where + requested */ + ulint line; /* in debug version line where + requested */ + os_thread_id_t thread; /* thread id of this waiting + thread */ + ibool waiting; /* TRUE if the thread has already + called sync_array_event_wait + on this cell but not yet + sync_array_free_cell (which + actually resets wait_object and thus + whole cell) */ + ibool event_set; /* TRUE if the event is set */ + os_event_t event; /* operating system event + semaphore handle */ +}; + +/* NOTE: It is allowed for a thread to wait +for an event allocated for the array without owning the +protecting mutex (depending on the case: OS or database mutex), but +all changes (set or reset) to the state of the event must be made +while owning the mutex. */ +struct sync_array_struct { + ulint n_reserved; /* number of currently reserved + cells in the wait array */ + ulint n_cells; /* number of cells in the + wait array */ + sync_cell_t* array; /* pointer to wait array */ + ulint protection; /* this flag tells which + mutex protects the data */ + mutex_t mutex; /* possible database mutex + protecting this data structure */ + os_mutex_t os_mutex; /* Possible operating system mutex + protecting the data structure. + As this data structure is used in + constructing the database mutex, + to prevent infinite recursion + in implementation, we fall back to + an OS mutex. */ + ulint sg_count; /* count of how many times an + object has been signalled */ + ulint res_count; /* count of cell reservations + since creation of the array */ +}; + +/********************************************************************** +This function is called only in the debug version. Detects a deadlock +of one or more threads because of waits of semaphores. */ +static +ibool +sync_array_detect_deadlock( +/*=======================*/ + /* out: TRUE if deadlock detected */ + sync_array_t* arr, /* in: wait array; NOTE! the caller must + own the mutex to array */ + sync_cell_t* start, /* in: cell where recursive search started */ + sync_cell_t* cell, /* in: cell to search */ + ulint depth); /* in: recursion depth */ + +/********************************************************************* +Gets the nth cell in array. */ +static +sync_cell_t* +sync_array_get_nth_cell( +/*====================*/ + /* out: cell */ + sync_array_t* arr, /* in: sync array */ + ulint n) /* in: index */ +{ + ut_a(arr); + ut_a(n >= 0); + ut_a(n < arr->n_cells); + + return(arr->array + n); +} + +/********************************************************************** +Reserves the mutex semaphore protecting a sync array. */ +static +void +sync_array_enter( +/*=============*/ + sync_array_t* arr) /* in: sync wait array */ +{ + ulint protection; + + protection = arr->protection; + + if (protection == SYNC_ARRAY_OS_MUTEX) { + os_mutex_enter(arr->os_mutex); + } else if (protection == SYNC_ARRAY_MUTEX) { + mutex_enter(&(arr->mutex)); + } else { + ut_error; + } +} + +/********************************************************************** +Releases the mutex semaphore protecting a sync array. */ +static +void +sync_array_exit( +/*============*/ + sync_array_t* arr) /* in: sync wait array */ +{ + ulint protection; + + protection = arr->protection; + + if (protection == SYNC_ARRAY_OS_MUTEX) { + os_mutex_exit(arr->os_mutex); + } else if (protection == SYNC_ARRAY_MUTEX) { + mutex_exit(&(arr->mutex)); + } else { + ut_error; + } +} + +/*********************************************************************** +Creates a synchronization wait array. It is protected by a mutex +which is automatically reserved when the functions operating on it +are called. */ + +sync_array_t* +sync_array_create( +/*==============*/ + /* out, own: created wait array */ + ulint n_cells, /* in: number of cells in the array + to create */ + ulint protection) /* in: either SYNC_ARRAY_OS_MUTEX or + SYNC_ARRAY_MUTEX: determines the type + of mutex protecting the data structure */ +{ + sync_array_t* arr; + sync_cell_t* cell_array; + sync_cell_t* cell; + ulint i; + + ut_a(n_cells > 0); + + /* Allocate memory for the data structures */ + arr = ut_malloc(sizeof(sync_array_t)); + + cell_array = ut_malloc(sizeof(sync_cell_t) * n_cells); + + arr->n_cells = n_cells; + arr->n_reserved = 0; + arr->array = cell_array; + arr->protection = protection; + arr->sg_count = 0; + arr->res_count = 0; + + /* Then create the mutex to protect the wait array complex */ + if (protection == SYNC_ARRAY_OS_MUTEX) { + arr->os_mutex = os_mutex_create(NULL); + } else if (protection == SYNC_ARRAY_MUTEX) { + mutex_create(&(arr->mutex)); + mutex_set_level(&(arr->mutex), SYNC_NO_ORDER_CHECK); + } else { + ut_error; + } + + for (i = 0; i < n_cells; i++) { + cell = sync_array_get_nth_cell(arr, i); + cell->wait_object = NULL; + + /* Create an operating system event semaphore with no name */ + cell->event = os_event_create(NULL); + cell->event_set = FALSE; /* it is created in reset state */ + } + + return(arr); +} + +/********************************************************************** +Frees the resources in a wait array. */ + +void +sync_array_free( +/*============*/ + sync_array_t* arr) /* in, own: sync wait array */ +{ + ulint i; + sync_cell_t* cell; + ulint protection; + + ut_a(arr->n_reserved == 0); + + sync_array_validate(arr); + + for (i = 0; i < arr->n_cells; i++) { + cell = sync_array_get_nth_cell(arr, i); + os_event_free(cell->event); + } + + protection = arr->protection; + + /* Release the mutex protecting the wait array complex */ + + if (protection == SYNC_ARRAY_OS_MUTEX) { + os_mutex_free(arr->os_mutex); + } else if (protection == SYNC_ARRAY_MUTEX) { + mutex_free(&(arr->mutex)); + } else { + ut_error; + } + + ut_free(arr->array); + ut_free(arr); +} + +/************************************************************************ +Validates the integrity of the wait array. Checks +that the number of reserved cells equals the count variable. */ + +void +sync_array_validate( +/*================*/ + sync_array_t* arr) /* in: sync wait array */ +{ + ulint i; + sync_cell_t* cell; + ulint count = 0; + + sync_array_enter(arr); + + for (i = 0; i < arr->n_cells; i++) { + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL) { + count++; + } + } + + ut_a(count == arr->n_reserved); + + sync_array_exit(arr); +} + +/*********************************************************************** +Puts the cell event in set state. */ +static +void +sync_cell_event_set( +/*================*/ + sync_cell_t* cell) /* in: array cell */ +{ + os_event_set(cell->event); + cell->event_set = TRUE; +} + +/*********************************************************************** +Puts the cell event in reset state. */ +static +void +sync_cell_event_reset( +/*==================*/ + sync_cell_t* cell) /* in: array cell */ +{ + os_event_reset(cell->event); + cell->event_set = FALSE; +} + +/********************************************************************** +Reserves a wait array cell for waiting for an object. +The event of the cell is reset to nonsignalled state. */ + +void +sync_array_reserve_cell( +/*====================*/ + sync_array_t* arr, /* in: wait array */ + void* object, /* in: pointer to the object to wait for */ + ulint type, /* in: lock request type */ + #ifdef UNIV_SYNC_DEBUG + char* file, /* in: in debug version file where + requested */ + ulint line, /* in: in the debug version line where + requested */ + #endif + ulint* index) /* out: index of the reserved cell */ +{ + ulint i; + sync_cell_t* cell; + + ut_a(object); + ut_a(index); + + sync_array_enter(arr); + + arr->res_count++; + + /* Reserve a new cell. */ + for (i = 0; i < arr->n_cells; i++) { + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object == NULL) { + + /* Make sure the event is reset */ + if (cell->event_set) { + sync_cell_event_reset(cell); + } + + cell->wait_object = object; + cell->request_type = type; + cell->thread = os_thread_get_curr_id(); + cell->waiting = FALSE; + + #ifdef UNIV_SYNC_DEBUG + cell->file = file; + cell->line = line; + #else + cell->file = "NOT KNOWN"; + cell->line = 0; + #endif + + arr->n_reserved++; + + *index = i; + + sync_array_exit(arr); + + return; + } + } + + ut_error; /* No free cell found */ + + return; +} + +/********************************************************************** +This function should be called when a thread starts to wait on +a wait array cell. In the debug version this function checks +if the wait for a semaphore will result in a deadlock, in which +case prints info and asserts. */ + +void +sync_array_wait_event( +/*==================*/ + sync_array_t* arr, /* in: wait array */ + ulint index) /* in: index of the reserved cell */ +{ + sync_cell_t* cell; + os_event_t event; + + ut_a(arr); + + sync_array_enter(arr); + + cell = sync_array_get_nth_cell(arr, index); + + ut_a(cell->wait_object); + ut_a(!cell->waiting); + ut_ad(os_thread_get_curr_id() == cell->thread); + + event = cell->event; + cell->waiting = TRUE; + +#ifdef UNIV_SYNC_DEBUG + + /* We use simple enter to the mutex below, because if + we cannot acquire it at once, mutex_enter would call + recursively sync_array routines, leading to trouble. + rw_lock_debug_mutex freezes the debug lists. */ + + rw_lock_debug_mutex_enter(); + + if (TRUE == sync_array_detect_deadlock(arr, cell, cell, 0)) { + + printf("########################################\n"); + ut_error; + } + + rw_lock_debug_mutex_exit(); +#endif + sync_array_exit(arr); + + os_event_wait(event); + + sync_array_free_cell(arr, index); +} + +/********************************************************************** +Reports info of a wait array cell. */ +static +void +sync_array_cell_print( +/*==================*/ + sync_cell_t* cell) /* in: sync cell */ +{ + char* str = NULL; + ulint type; + + type = cell->request_type; + + if (type == SYNC_MUTEX) { + str = "MUTEX ENTER"; + } else if (type == RW_LOCK_EX) { + str = "X-LOCK"; + } else if (type == RW_LOCK_SHARED) { + str = "S-LOCK"; + } else { + ut_error; + } + + printf("%lx waited for by thread %lu op. %s file %s line %lu ", + cell->wait_object, cell->thread, + str, cell->file,cell->line); + if (!cell->waiting) { + printf("WAIT ENDED "); + } + + if (cell->event_set) { + printf("EVENT SET"); + } + + printf("\n"); +} + +/********************************************************************** +Looks for a cell with the given thread id. */ +static +sync_cell_t* +sync_array_find_thread( +/*===================*/ + /* out: pointer to cell or NULL + if not found */ + sync_array_t* arr, /* in: wait array */ + os_thread_id_t thread) /* in: thread id */ +{ + ulint i; + sync_cell_t* cell; + + for (i = 0; i < arr->n_cells; i++) { + + cell = sync_array_get_nth_cell(arr, i); + + if ((cell->wait_object != NULL) + && (cell->thread == thread)) { + + return(cell); /* Found */ + } + } + + return(NULL); /* Not found */ +} + +/********************************************************************** +Recursion step for deadlock detection. */ +static +ibool +sync_array_deadlock_step( +/*=====================*/ + /* out: TRUE if deadlock detected */ + sync_array_t* arr, /* in: wait array; NOTE! the caller must + own the mutex to array */ + sync_cell_t* start, /* in: cell where recursive search + started */ + os_thread_id_t thread, /* in: thread to look at */ + ulint pass, /* in: pass value */ + ulint depth) /* in: recursion depth */ +{ + sync_cell_t* new; + ibool ret; + + depth++; + + if (pass != 0) { + /* If pass != 0, then we do not know which threads are + responsible of releasing the lock, and no deadlock can + be detected. */ + + return(FALSE); + } + + new = sync_array_find_thread(arr, thread); + + if (new == start) { + /* Stop running of other threads */ + + ut_dbg_stop_threads = TRUE; + + /* Deadlock */ + printf("########################################\n"); + printf("DEADLOCK of threads detected!\n"); + + return(TRUE); + + } else if (new) { + ret = sync_array_detect_deadlock(arr, start, new, depth); + + if (ret) { + return(TRUE); + } + } + return(FALSE); +} + +/********************************************************************** +This function is called only in the debug version. Detects a deadlock +of one or more threads because of waits of semaphores. */ +static +ibool +sync_array_detect_deadlock( +/*=======================*/ + /* out: TRUE if deadlock detected */ + sync_array_t* arr, /* in: wait array; NOTE! the caller must + own the mutex to array */ + sync_cell_t* start, /* in: cell where recursive search started */ + sync_cell_t* cell, /* in: cell to search */ + ulint depth) /* in: recursion depth */ +{ + mutex_t* mutex; + rw_lock_t* lock; + os_thread_id_t thread; + ibool ret; + rw_lock_debug_t* debug; + + ut_a(arr && start && cell); + ut_ad(cell->wait_object); + ut_ad(os_thread_get_curr_id() == start->thread); + ut_ad(depth < 100); + + depth++; + + if (cell->event_set || !cell->waiting) { + + return(FALSE); /* No deadlock here */ + } + + if (cell->request_type == SYNC_MUTEX) { + + mutex = cell->wait_object; + + if (mutex_get_lock_word(mutex) != 0) { + + thread = mutex->thread_id; + + /* Note that mutex->thread_id above may be + also OS_THREAD_ID_UNDEFINED, because the + thread which held the mutex maybe has not + yet updated the value, or it has already + released the mutex: in this case no deadlock + can occur, as the wait array cannot contain + a thread with ID_UNDEFINED value. */ + ret = sync_array_deadlock_step(arr, start, thread, 0, + depth); + if (ret) { + printf( + "Mutex %lx owned by thread %lu file %s line %lu\n", + (ulint)mutex, mutex->thread_id, + mutex->file_name, mutex->line); + sync_array_cell_print(cell); + return(TRUE); + } + } + + return(FALSE); /* No deadlock */ + + } else if (cell->request_type == RW_LOCK_EX) { + + lock = cell->wait_object; + + debug = UT_LIST_GET_FIRST(lock->debug_list); + + while (debug != NULL) { + + thread = debug->thread_id; + + if (((debug->lock_type == RW_LOCK_EX) + && (thread != cell->thread)) + || ((debug->lock_type == RW_LOCK_WAIT_EX) + && (thread != cell->thread)) + || (debug->lock_type == RW_LOCK_SHARED)) { + + /* The (wait) x-lock request can block infinitely + only if someone (can be also cell thread) is holding + s-lock, or someone (cannot be cell thread) (wait) + x-lock, and he is blocked by start thread */ + + ret = sync_array_deadlock_step(arr, start, thread, + debug->pass, + depth); + if (ret) { + printf("rw-lock %lx ", lock); + rw_lock_debug_print(debug); + sync_array_cell_print(cell); + + return(TRUE); + } + } + + debug = UT_LIST_GET_NEXT(list, debug); + } + + return(FALSE); + + } else if (cell->request_type == RW_LOCK_SHARED) { + + lock = cell->wait_object; + debug = UT_LIST_GET_FIRST(lock->debug_list); + + while (debug != NULL) { + + thread = debug->thread_id; + + if ((debug->lock_type == RW_LOCK_EX) + || (debug->lock_type == RW_LOCK_WAIT_EX)) { + + /* The s-lock request can block infinitely only if + someone (can also be cell thread) is holding (wait) + x-lock, and he is blocked by start thread */ + + ret = sync_array_deadlock_step(arr, start, thread, + debug->pass, + depth); + if (ret) { + printf("rw-lock %lx ", lock); + rw_lock_debug_print(debug); + sync_array_cell_print(cell); + + return(TRUE); + } + } + + debug = UT_LIST_GET_NEXT(list, debug); + } + + return(FALSE); + + } else { + ut_error; + } + + return(TRUE); /* Execution never reaches this line: for compiler + fooling only */ +} + +/********************************************************************** +Frees the cell. NOTE! sync_array_wait_event frees the cell +automatically! */ + +void +sync_array_free_cell( +/*=================*/ + sync_array_t* arr, /* in: wait array */ + ulint index) /* in: index of the cell in array */ +{ + sync_cell_t* cell; + + sync_array_enter(arr); + + cell = sync_array_get_nth_cell(arr, index); + + ut_a(cell->wait_object != NULL); + + cell->wait_object = NULL; + + ut_a(arr->n_reserved > 0); + arr->n_reserved--; + + sync_array_exit(arr); +} + +/************************************************************************** +Looks for the cells in the wait array which refer +to the wait object specified, +and sets their corresponding events to the signaled state. In this +way releases the threads waiting for the object to contend for the object. +It is possible that no such cell is found, in which case does nothing. */ + +void +sync_array_signal_object( +/*=====================*/ + sync_array_t* arr, /* in: wait array */ + void* object) /* in: wait object */ +{ + sync_cell_t* cell; + ulint count; + ulint i; + + sync_array_enter(arr); + + arr->sg_count++; + + i = 0; + count = 0; + + while (count < arr->n_reserved) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL) { + + count++; + if (cell->wait_object == object) { + + sync_cell_event_set(cell); + } + } + + i++; + } + + sync_array_exit(arr); +} + +/************************************************************************** +Prints info of the wait array. */ +static +void +sync_array_output_info( +/*===================*/ + sync_array_t* arr) /* in: wait array; NOTE! caller must own the + mutex */ +{ + sync_cell_t* cell; + ulint count; + ulint i; + + printf("-----------------------------------------------------\n"); + printf("SYNC ARRAY INFO: reservation count %ld, signal count %ld\n", + arr->res_count, arr->sg_count); + i = 0; + count = 0; + + while (count < arr->n_reserved) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL) { + count++; + sync_array_cell_print(cell); + } + + i++; + } +} + +/************************************************************************** +Prints info of the wait array. */ + +void +sync_array_print_info( +/*==================*/ + sync_array_t* arr) /* in: wait array */ +{ + sync_array_enter(arr); + + sync_array_output_info(arr); + + sync_array_exit(arr); +} diff --git a/innobase/sync/sync0ipm.c b/innobase/sync/sync0ipm.c new file mode 100644 index 00000000000..e10e1c85da5 --- /dev/null +++ b/innobase/sync/sync0ipm.c @@ -0,0 +1,170 @@ +/****************************************************** +A fast mutex for interprocess synchronization. +mutex_t can be used only within single process, +but ip_mutex_t also between processes. + +(c) 1995 Innobase Oy + +Created 9/30/1995 Heikki Tuuri +*******************************************************/ +#include "sync0ipm.h" +#ifdef UNIV_NONINL +#include "sync0ipm.ic" +#endif + +#include "mem0mem.h" + +/* The performance of the ip mutex in NT depends on how often +a thread has to suspend itself waiting for the ip mutex +to become free. The following variable counts system calls +involved. */ + +ulint ip_mutex_system_call_count = 0; + +/********************************************************************** +Creates, or rather, initializes +an ip mutex object in a specified shared memory location (which must be +appropriately aligned). The ip mutex is initialized in the reset state. +NOTE! Explicit destroying of the ip mutex with ip_mutex_free +is not recommended +as the mutex resides in shared memory and we cannot make sure that +no process is currently accessing it. Therefore just use +ip_mutex_close to free the operating system event and mutex. */ + +ulint +ip_mutex_create( +/*============*/ + /* out: 0 if succeed */ + ip_mutex_t* ip_mutex, /* in: pointer to shared memory */ + char* name, /* in: name of the ip mutex */ + ip_mutex_hdl_t** handle) /* out, own: handle to the + created mutex; handle exists + in the private address space of + the calling process */ +{ + mutex_t* mutex; + char* buf; + os_event_t released; + os_mutex_t exclude; + + ip_mutex_set_waiters(ip_mutex, 0); + + buf = mem_alloc(strlen(name) + 20); + + strcpy(buf, name); + strcpy(buf + strlen(name), "_IB_RELS"); + + released = os_event_create(buf); + + if (released == NULL) { + mem_free(buf); + return(1); + } + + strcpy(buf + strlen(name), "_IB_EXCL"); + + exclude = os_mutex_create(buf); + + if (exclude == NULL) { + os_event_free(released); + mem_free(buf); + return(1); + } + + mutex = ip_mutex_get_mutex(ip_mutex); + + mutex_create(mutex); + mutex_set_level(mutex, SYNC_NO_ORDER_CHECK); + + *handle = mem_alloc(sizeof(ip_mutex_hdl_t)); + + (*handle)->ip_mutex = ip_mutex; + (*handle)->released = released; + (*handle)->exclude = exclude; + + mem_free(buf); + + return(0); +} + +/********************************************************************** +NOTE! Using this function is not recommended. See the note +on ip_mutex_create. Destroys an ip mutex */ + +void +ip_mutex_free( +/*==========*/ + ip_mutex_hdl_t* handle) /* in, own: ip mutex handle */ +{ + mutex_free(ip_mutex_get_mutex(handle->ip_mutex)); + + os_event_free(handle->released); + os_mutex_free(handle->exclude); + + mem_free(handle); +} + +/********************************************************************** +Opens an ip mutex object in a specified shared memory location. +Explicit closing of the ip mutex with ip_mutex_close is necessary to +free the operating system event and mutex created, and the handle. */ + +ulint +ip_mutex_open( +/*==========*/ + /* out: 0 if succeed */ + ip_mutex_t* ip_mutex, /* in: pointer to shared memory */ + char* name, /* in: name of the ip mutex */ + ip_mutex_hdl_t** handle) /* out, own: handle to the + opened mutex */ +{ + char* buf; + os_event_t released; + os_mutex_t exclude; + + buf = mem_alloc(strlen(name) + 20); + + strcpy(buf, name); + strcpy(buf + strlen(name), "_IB_RELS"); + + released = os_event_create(buf); + + if (released == NULL) { + mem_free(buf); + return(1); + } + + strcpy(buf + strlen(name), "_IB_EXCL"); + + exclude = os_mutex_create(buf); + + if (exclude == NULL) { + os_event_free(released); + mem_free(buf); + return(1); + } + + *handle = mem_alloc(sizeof(ip_mutex_hdl_t)); + + (*handle)->ip_mutex = ip_mutex; + (*handle)->released = released; + (*handle)->exclude = exclude; + + mem_free(buf); + + return(0); +} + +/********************************************************************** +Closes an ip mutex. */ + +void +ip_mutex_close( +/*===========*/ + ip_mutex_hdl_t* handle) /* in, own: ip mutex handle */ +{ + os_event_free(handle->released); + os_mutex_free(handle->exclude); + + mem_free(handle); +} diff --git a/innobase/sync/sync0rw.c b/innobase/sync/sync0rw.c new file mode 100644 index 00000000000..77589587065 --- /dev/null +++ b/innobase/sync/sync0rw.c @@ -0,0 +1,906 @@ +/****************************************************** +The read-write lock (for thread synchronization) + +(c) 1995 Innobase Oy + +Created 9/11/1995 Heikki Tuuri +*******************************************************/ + +#include "sync0rw.h" +#ifdef UNIV_NONINL +#include "sync0rw.ic" +#endif + +#include "os0thread.h" +#include "mem0mem.h" +#include "srv0srv.h" + +ulint rw_s_system_call_count = 0; +ulint rw_s_spin_wait_count = 0; + +ulint rw_s_exit_count = 0; + +ulint rw_x_system_call_count = 0; +ulint rw_x_spin_wait_count = 0; + +ulint rw_x_exit_count = 0; + +/* The global list of rw-locks */ +rw_lock_list_t rw_lock_list; +mutex_t rw_lock_list_mutex; + +/* The global mutex which protects debug info lists of all rw-locks. +To modify the debug info list of an rw-lock, this mutex has to be +acquired in addition to the mutex protecting the lock. */ + +mutex_t rw_lock_debug_mutex; +os_event_t rw_lock_debug_event; /* If deadlock detection does not + get immediately the mutex, it may + wait for this event */ +ibool rw_lock_debug_waiters; /* This is set to TRUE, if there may + be waiters for the event */ + +/********************************************************************** +Creates a debug info struct. */ +static +rw_lock_debug_t* +rw_lock_debug_create(void); +/*======================*/ +/********************************************************************** +Frees a debug info struct. */ +static +void +rw_lock_debug_free( +/*===============*/ + rw_lock_debug_t* info); + +/********************************************************************** +Creates a debug info struct. */ +static +rw_lock_debug_t* +rw_lock_debug_create(void) +/*======================*/ +{ + return((rw_lock_debug_t*) mem_alloc(sizeof(rw_lock_debug_t))); +} + +/********************************************************************** +Frees a debug info struct. */ +static +void +rw_lock_debug_free( +/*===============*/ + rw_lock_debug_t* info) +{ + mem_free(info); +} + +/********************************************************************** +Creates, or rather, initializes an rw-lock object in a specified memory +location (which must be appropriately aligned). The rw-lock is initialized +to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free +is necessary only if the memory block containing it is freed. */ + +void +rw_lock_create_func( +/*================*/ + rw_lock_t* lock, /* in: pointer to memory */ + char* cfile_name, /* in: file name where created */ + ulint cline) /* in: file line where created */ +{ + /* If this is the very first time a synchronization + object is created, then the following call initializes + the sync system. */ + + mutex_create(rw_lock_get_mutex(lock)); + mutex_set_level(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); + + ut_memcpy(&(lock->mutex.cfile_name), cfile_name, + ut_min(RW_CNAME_LEN - 1, ut_strlen(cfile_name))); + lock->mutex.cline = cline; + + rw_lock_set_waiters(lock, 0); + rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED); + lock->writer_count = 0; + rw_lock_set_reader_count(lock, 0); + + lock->writer_is_wait_ex = FALSE; + + UT_LIST_INIT(lock->debug_list); + + lock->magic_n = RW_LOCK_MAGIC_N; + lock->level = SYNC_LEVEL_NONE; + + ut_memcpy(&(lock->cfile_name), cfile_name, + ut_min(RW_CNAME_LEN - 1, ut_strlen(cfile_name))); + lock->cfile_name[RW_CNAME_LEN - 1] = '\0'; + lock->cline = cline; + + mutex_enter(&rw_lock_list_mutex); + + UT_LIST_ADD_FIRST(list, rw_lock_list, lock); + + mutex_exit(&rw_lock_list_mutex); +} + +/********************************************************************** +Calling this function is obligatory only if the memory buffer containing +the rw-lock is freed. Removes an rw-lock object from the global list. The +rw-lock is checked to be in the non-locked state. */ + +void +rw_lock_free( +/*=========*/ + rw_lock_t* lock) /* in: rw-lock */ +{ + ut_ad(rw_lock_validate(lock)); + ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED); + ut_a(rw_lock_get_waiters(lock) == 0); + ut_a(rw_lock_get_reader_count(lock) == 0); + + lock->magic_n = 0; + + mutex_free(rw_lock_get_mutex(lock)); + + mutex_enter(&rw_lock_list_mutex); + + UT_LIST_REMOVE(list, rw_lock_list, lock); + + mutex_exit(&rw_lock_list_mutex); +} + +/********************************************************************** +Checks that the rw-lock has been initialized and that there are no +simultaneous shared and exclusive locks. */ + +ibool +rw_lock_validate( +/*=============*/ + rw_lock_t* lock) +{ + ut_a(lock); + + mutex_enter(rw_lock_get_mutex(lock)); + + ut_a(lock->magic_n == RW_LOCK_MAGIC_N); + ut_a((rw_lock_get_reader_count(lock) == 0) + || (rw_lock_get_writer(lock) != RW_LOCK_EX)); + ut_a(rw_lock_get_reader_count(lock) >= 0); + ut_a((rw_lock_get_writer(lock) == RW_LOCK_EX) + || (rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX) + || (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED)); + ut_a((rw_lock_get_waiters(lock) == 0) + || (rw_lock_get_waiters(lock) == 1)); + ut_a((lock->writer != RW_LOCK_EX) || (lock->writer_count > 0)); + + mutex_exit(rw_lock_get_mutex(lock)); + + return(TRUE); +} + +/********************************************************************** +Lock an rw-lock in shared mode for the current thread. If the rw-lock is +locked in exclusive mode, or there is an exclusive lock request waiting, +the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting +for the lock, before suspending the thread. */ + +void +rw_lock_s_lock_spin( +/*================*/ + rw_lock_t* lock /* in: pointer to rw-lock */ + #ifdef UNIV_SYNC_DEBUG + ,ulint pass, /* in: pass value; != 0, if the lock + will be passed to another thread to unlock */ + char* file_name, /* in: file name where lock requested */ + ulint line /* in: line where requested */ + #endif +) +{ + ulint index; /* index of the reserved wait cell */ + ulint i; /* spin round count */ + + ut_ad(rw_lock_validate(lock)); + +lock_loop: + rw_s_spin_wait_count++; + + /* Spin waiting for the writer field to become free */ + i = 0; + + while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED + && i < SYNC_SPIN_ROUNDS) { + if (srv_spin_wait_delay) { + ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); + } + + i++; + } + + if (i == SYNC_SPIN_ROUNDS) { + os_thread_yield(); + } + + if (srv_print_latch_waits) { + printf( + "Thread %lu spin wait rw-s-lock at %lx cfile %s cline %lu rnds %lu\n", + os_thread_get_curr_id(), (ulint)lock, + &(lock->cfile_name), lock->cline, i); + } + + mutex_enter(rw_lock_get_mutex(lock)); + + /* We try once again to obtain the lock */ + + if (TRUE == rw_lock_s_lock_low(lock + #ifdef UNIV_SYNC_DEBUG + , pass, file_name, + line + #endif + )) { + mutex_exit(rw_lock_get_mutex(lock)); + + return; /* Success */ + } else { + /* If we get here, locking did not succeed, we may + suspend the thread to wait in the wait array */ + + rw_s_system_call_count++; + + sync_array_reserve_cell(sync_primary_wait_array, + lock, RW_LOCK_SHARED, + #ifdef UNIV_SYNC_DEBUG + file_name, line, + #endif + &index); + + rw_lock_set_waiters(lock, 1); + + mutex_exit(rw_lock_get_mutex(lock)); + + if (srv_print_latch_waits) { + printf( + "Thread %lu OS wait rw-s-lock at %lx cfile %s cline %lu\n", + os_thread_get_curr_id(), (ulint)lock, + &(lock->cfile_name), lock->cline); + } + + rw_s_system_call_count++; + + sync_array_wait_event(sync_primary_wait_array, index); + + goto lock_loop; + } +} + +/********************************************************************** +This function is used in the insert buffer to move the ownership of an +x-latch on a buffer frame to the current thread. The x-latch was set by +the buffer read operation and it protected the buffer frame while the +read was done. The ownership is moved because we want that the current +thread is able to acquire a second x-latch which is stored in an mtr. +This, in turn, is needed to pass the debug checks of index page +operations. */ + +void +rw_lock_x_lock_move_ownership( +/*==========================*/ + rw_lock_t* lock) /* in: lock which was x-locked in the + buffer read */ +{ + ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX)); + + mutex_enter(&(lock->mutex)); + + lock->writer_thread = os_thread_get_curr_id(); + + lock->pass = 0; + + mutex_exit(&(lock->mutex)); +} + +/********************************************************************** +Low-level function for acquiring an exclusive lock. */ +UNIV_INLINE +ulint +rw_lock_x_lock_low( +/*===============*/ + /* out: RW_LOCK_NOT_LOCKED if did + not succeed, RW_LOCK_EX if success, + RW_LOCK_WAIT_EX, if got wait reservation */ + rw_lock_t* lock, /* in: pointer to rw-lock */ + ulint pass /* in: pass value; != 0, if the lock will + be passed to another thread to unlock */ + #ifdef UNIV_SYNC_DEBUG + ,char* file_name, /* in: file name where lock requested */ + ulint line /* in: line where requested */ + #endif +) +{ + ut_ad(mutex_own(rw_lock_get_mutex(lock))); + + if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) { + + if (rw_lock_get_reader_count(lock) == 0) { + + rw_lock_set_writer(lock, RW_LOCK_EX); + lock->writer_thread = os_thread_get_curr_id(); + lock->writer_count++; + lock->pass = pass; + + #ifdef UNIV_SYNC_DEBUG + rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, + file_name, line); + #endif + + /* Locking succeeded, we may return */ + return(RW_LOCK_EX); + } else { + /* There are readers, we have to wait */ + rw_lock_set_writer(lock, RW_LOCK_WAIT_EX); + lock->writer_thread = os_thread_get_curr_id(); + lock->pass = pass; + lock->writer_is_wait_ex = TRUE; + + #ifdef UNIV_SYNC_DEBUG + rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX, + file_name, line); + #endif + + return(RW_LOCK_WAIT_EX); + } + + } else if ((rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX) + && (lock->writer_thread == os_thread_get_curr_id())) { + + if (rw_lock_get_reader_count(lock) == 0) { + + rw_lock_set_writer(lock, RW_LOCK_EX); + lock->writer_count++; + lock->pass = pass; + lock->writer_is_wait_ex = FALSE; + + #ifdef UNIV_SYNC_DEBUG + rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX); + rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, + file_name, line); + #endif + + /* Locking succeeded, we may return */ + return(RW_LOCK_EX); + } + + return(RW_LOCK_WAIT_EX); + + } else if ((rw_lock_get_writer(lock) == RW_LOCK_EX) + && (lock->writer_thread == os_thread_get_curr_id()) + && (lock->pass == 0) + && (pass == 0)) { + + lock->writer_count++; + + #ifdef UNIV_SYNC_DEBUG + rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name, + line); + #endif + + /* Locking succeeded, we may return */ + return(RW_LOCK_EX); + } + + /* Locking did not succeed */ + return(RW_LOCK_NOT_LOCKED); +} + +/********************************************************************** +NOTE! Use the corresponding macro, not directly this function! Lock an +rw-lock in exclusive mode for the current thread. If the rw-lock is locked +in shared or exclusive mode, or there is an exclusive lock request waiting, +the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting +for the lock before suspending the thread. If the same thread has an x-lock +on the rw-lock, locking succeed, with the following exception: if pass != 0, +only a single x-lock may be taken on the lock. NOTE: If the same thread has +an s-lock, locking does not succeed! */ + +void +rw_lock_x_lock_func( +/*================*/ + rw_lock_t* lock, /* in: pointer to rw-lock */ + ulint pass /* in: pass value; != 0, if the lock will + be passed to another thread to unlock */ + #ifdef UNIV_SYNC_DEBUG + ,char* file_name, /* in: file name where lock requested */ + ulint line /* in: line where requested */ + #endif +) +{ + ulint index; /* index of the reserved wait cell */ + ulint state; /* lock state acquired */ + ulint i; /* spin round count */ + + ut_ad(rw_lock_validate(lock)); + +lock_loop: + /* Acquire the mutex protecting the rw-lock fields */ + mutex_enter_fast(&(lock->mutex)); + + state = rw_lock_x_lock_low(lock, pass + #ifdef UNIV_SYNC_DEBUG + ,file_name, line + #endif + ); + + mutex_exit(&(lock->mutex)); + + if (state == RW_LOCK_EX) { + + return; /* Locking succeeded */ + + } else if (state == RW_LOCK_NOT_LOCKED) { + + /* Spin waiting for the writer field to become free */ + i = 0; + + while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED + && i < SYNC_SPIN_ROUNDS) { + if (srv_spin_wait_delay) { + ut_delay(ut_rnd_interval(0, + srv_spin_wait_delay)); + } + + i++; + } + if (i == SYNC_SPIN_ROUNDS) { + os_thread_yield(); + } + } else if (state == RW_LOCK_WAIT_EX) { + + /* Spin waiting for the reader count field to become zero */ + i = 0; + + while (rw_lock_get_reader_count(lock) != 0 + && i < SYNC_SPIN_ROUNDS) { + if (srv_spin_wait_delay) { + ut_delay(ut_rnd_interval(0, + srv_spin_wait_delay)); + } + + i++; + } + if (i == SYNC_SPIN_ROUNDS) { + os_thread_yield(); + } + } else { + ut_error; + } + + if (srv_print_latch_waits) { + printf( + "Thread %lu spin wait rw-x-lock at %lx cfile %s cline %lu rnds %lu\n", + os_thread_get_curr_id(), (ulint)lock, + &(lock->cfile_name), lock->cline, i); + } + + rw_x_spin_wait_count++; + + /* We try once again to obtain the lock. Acquire the mutex protecting + the rw-lock fields */ + + mutex_enter(rw_lock_get_mutex(lock)); + + state = rw_lock_x_lock_low(lock, pass + #ifdef UNIV_SYNC_DEBUG + ,file_name, line + #endif + ); + + if (state == RW_LOCK_EX) { + mutex_exit(rw_lock_get_mutex(lock)); + + return; /* Locking succeeded */ + } + + rw_x_system_call_count++; + + sync_array_reserve_cell(sync_primary_wait_array, + lock, RW_LOCK_EX, + #ifdef UNIV_SYNC_DEBUG + file_name, line, + #endif + &index); + + rw_lock_set_waiters(lock, 1); + + mutex_exit(rw_lock_get_mutex(lock)); + + if (srv_print_latch_waits) { + printf( + "Thread %lu OS wait for rw-x-lock at %lx cfile %s cline %lu\n", + os_thread_get_curr_id(), (ulint)lock, &(lock->cfile_name), + lock->cline); + } + + rw_x_system_call_count++; + + sync_array_wait_event(sync_primary_wait_array, index); + + goto lock_loop; +} + +/********************************************************************** +Acquires the debug mutex. We cannot use the mutex defined in sync0sync, +because the debug mutex is also acquired in sync0arr while holding the OS +mutex protecting the sync array, and the ordinary mutex_enter might +recursively call routines in sync0arr, leading to a deadlock on the OS +mutex. */ + +void +rw_lock_debug_mutex_enter(void) +/*==========================*/ +{ +loop: + if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { + + return; + } + + os_event_reset(rw_lock_debug_event); + + rw_lock_debug_waiters = TRUE; + + if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { + + return; + } + + os_event_wait(rw_lock_debug_event); + + goto loop; +} + +/********************************************************************** +Releases the debug mutex. */ + +void +rw_lock_debug_mutex_exit(void) +/*==========================*/ +{ + mutex_exit(&rw_lock_debug_mutex); + + if (rw_lock_debug_waiters) { + rw_lock_debug_waiters = FALSE; + os_event_set(rw_lock_debug_event); + } +} + +/********************************************************************** +Inserts the debug information for an rw-lock. */ + +void +rw_lock_add_debug_info( +/*===================*/ + rw_lock_t* lock, /* in: rw-lock */ + ulint pass, /* in: pass value */ + ulint lock_type, /* in: lock type */ + char* file_name, /* in: file where requested */ + ulint line) /* in: line where requested */ +{ + rw_lock_debug_t* info; + + ut_ad(lock); + ut_ad(file_name); + + info = rw_lock_debug_create(); + + rw_lock_debug_mutex_enter(); + + info->file_name = file_name; + info->line = line; + info->lock_type = lock_type; + info->thread_id = os_thread_get_curr_id(); + info->pass = pass; + + UT_LIST_ADD_FIRST(list, lock->debug_list, info); + + rw_lock_debug_mutex_exit(); + + if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) { + sync_thread_add_level(lock, lock->level); + } +} + +/********************************************************************** +Removes a debug information struct for an rw-lock. */ + +void +rw_lock_remove_debug_info( +/*======================*/ + rw_lock_t* lock, /* in: rw-lock */ + ulint pass, /* in: pass value */ + ulint lock_type) /* in: lock type */ +{ + rw_lock_debug_t* info; + + ut_ad(lock); + + if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) { + sync_thread_reset_level(lock); + } + + rw_lock_debug_mutex_enter(); + + info = UT_LIST_GET_FIRST(lock->debug_list); + + while (info != NULL) { + if ((pass == info->pass) + && ((pass != 0) + || (info->thread_id == os_thread_get_curr_id())) + && (info->lock_type == lock_type)) { + + /* Found! */ + UT_LIST_REMOVE(list, lock->debug_list, info); + rw_lock_debug_mutex_exit(); + + rw_lock_debug_free(info); + + return; + } + + info = UT_LIST_GET_NEXT(list, info); + } + + ut_error; +} + +/********************************************************************** +Sets the rw-lock latching level field. */ + +void +rw_lock_set_level( +/*==============*/ + rw_lock_t* lock, /* in: rw-lock */ + ulint level) /* in: level */ +{ + lock->level = level; +} + +/********************************************************************** +Checks if the thread has locked the rw-lock in the specified mode, with +the pass value == 0. */ + +ibool +rw_lock_own( +/*========*/ + /* out: TRUE if locked */ + rw_lock_t* lock, /* in: rw-lock */ + ulint lock_type) /* in: lock type */ +{ + rw_lock_debug_t* info; + + ut_ad(lock); + ut_ad(rw_lock_validate(lock)); + +#ifndef UNIV_SYNC_DEBUG + ut_error; +#endif + mutex_enter(&(lock->mutex)); + + info = UT_LIST_GET_FIRST(lock->debug_list); + + while (info != NULL) { + + if ((info->thread_id == os_thread_get_curr_id()) + && (info->pass == 0) + && (info->lock_type == lock_type)) { + + mutex_exit(&(lock->mutex)); + /* Found! */ + + return(TRUE); + } + + info = UT_LIST_GET_NEXT(list, info); + } + mutex_exit(&(lock->mutex)); + + return(FALSE); +} + +/********************************************************************** +Checks if somebody has locked the rw-lock in the specified mode. */ + +ibool +rw_lock_is_locked( +/*==============*/ + /* out: TRUE if locked */ + rw_lock_t* lock, /* in: rw-lock */ + ulint lock_type) /* in: lock type: RW_LOCK_SHARED, + RW_LOCK_EX */ +{ + ibool ret = FALSE; + + ut_ad(lock); + ut_ad(rw_lock_validate(lock)); + + mutex_enter(&(lock->mutex)); + + if (lock_type == RW_LOCK_SHARED) { + if (lock->reader_count > 0) { + ret = TRUE; + } + } else if (lock_type == RW_LOCK_EX) { + if (lock->writer == RW_LOCK_EX) { + ret = TRUE; + } + } else { + ut_error; + } + + mutex_exit(&(lock->mutex)); + + return(ret); +} + +/******************************************************************* +Prints debug info of currently locked rw-locks. */ + +void +rw_lock_list_print_info(void) +/*=========================*/ +{ +#ifndef UNIV_SYNC_DEBUG + printf( + "Sorry, cannot give rw-lock list info in non-debug version!\n"); +#else + rw_lock_t* lock; + ulint count = 0; + rw_lock_debug_t* info; + + mutex_enter(&rw_lock_list_mutex); + + printf("----------------------------------------------\n"); + printf("RW-LOCK INFO\n"); + + lock = UT_LIST_GET_FIRST(rw_lock_list); + + while (lock != NULL) { + + count++; + + mutex_enter(&(lock->mutex)); + + if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) + || (rw_lock_get_reader_count(lock) != 0) + || (rw_lock_get_waiters(lock) != 0)) { + + printf("RW-LOCK: %lx ", (ulint)lock); + + if (rw_lock_get_waiters(lock)) { + printf(" Waiters for the lock exist\n"); + } else { + printf("\n"); + } + + info = UT_LIST_GET_FIRST(lock->debug_list); + while (info != NULL) { + rw_lock_debug_print(info); + info = UT_LIST_GET_NEXT(list, info); + } + } + + mutex_exit(&(lock->mutex)); + lock = UT_LIST_GET_NEXT(list, lock); + } + + printf("Total number of rw-locks %ld\n", count); + mutex_exit(&rw_lock_list_mutex); +#endif +} + +/******************************************************************* +Prints debug info of an rw-lock. */ + +void +rw_lock_print( +/*==========*/ + rw_lock_t* lock) /* in: rw-lock */ +{ +#ifndef UNIV_SYNC_DEBUG + printf( + "Sorry, cannot give rw-lock info in non-debug version!\n"); +#else + ulint count = 0; + rw_lock_debug_t* info; + + printf("----------------------------------------------\n"); + printf("RW-LOCK INFO\n"); + printf("RW-LOCK: %lx ", (ulint)lock); + + mutex_enter(&(lock->mutex)); + if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) + || (rw_lock_get_reader_count(lock) != 0) + || (rw_lock_get_waiters(lock) != 0)) { + + if (rw_lock_get_waiters(lock)) { + printf(" Waiters for the lock exist\n"); + } else { + printf("\n"); + } + + info = UT_LIST_GET_FIRST(lock->debug_list); + while (info != NULL) { + rw_lock_debug_print(info); + info = UT_LIST_GET_NEXT(list, info); + } + } + + mutex_exit(&(lock->mutex)); +#endif +} + +/************************************************************************* +Prints info of a debug struct. */ + +void +rw_lock_debug_print( +/*================*/ + rw_lock_debug_t* info) /* in: debug struct */ +{ + ulint rwt; + + rwt = info->lock_type; + + printf("Locked: thread %ld file %s line %ld ", + info->thread_id, info->file_name, info->line); + if (rwt == RW_LOCK_SHARED) { + printf("S-LOCK"); + } else if (rwt == RW_LOCK_EX) { + printf("X-LOCK"); + } else if (rwt == RW_LOCK_WAIT_EX) { + printf("WAIT X-LOCK"); + } else { + ut_error; + } + if (info->pass != 0) { + printf(" pass value %lu", info->pass); + } + printf("\n"); +} + +/******************************************************************* +Returns the number of currently locked rw-locks. Works only in the debug +version. */ + +ulint +rw_lock_n_locked(void) +/*==================*/ +{ +#ifndef UNIV_SYNC_DEBUG + printf( + "Sorry, cannot give rw-lock info in non-debug version!\n"); + ut_error; + return(0); +#else + rw_lock_t* lock; + ulint count = 0; + + mutex_enter(&rw_lock_list_mutex); + + lock = UT_LIST_GET_FIRST(rw_lock_list); + + while (lock != NULL) { + mutex_enter(rw_lock_get_mutex(lock)); + + if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) + || (rw_lock_get_reader_count(lock) != 0)) { + count++; + } + + mutex_exit(rw_lock_get_mutex(lock)); + lock = UT_LIST_GET_NEXT(list, lock); + } + + mutex_exit(&rw_lock_list_mutex); + + return(count); +#endif +} diff --git a/innobase/sync/sync0sync.c b/innobase/sync/sync0sync.c new file mode 100644 index 00000000000..f4ba472f6bf --- /dev/null +++ b/innobase/sync/sync0sync.c @@ -0,0 +1,1179 @@ +/****************************************************** +Mutex, the basic synchronization primitive + +(c) 1995 Innobase Oy + +Created 9/5/1995 Heikki Tuuri +*******************************************************/ + +#include "sync0sync.h" +#ifdef UNIV_NONINL +#include "sync0sync.ic" +#endif + +#include "sync0rw.h" +#include "buf0buf.h" +#include "srv0srv.h" +#include "buf0types.h" + +/* + REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX + ============================================ + +Semaphore operations in operating systems are slow: Solaris on a 1993 Sparc +takes 3 microseconds (us) for a lock-unlock pair and Windows NT on a 1995 +Pentium takes 20 microseconds for a lock-unlock pair. Therefore, we have to +implement our own efficient spin lock mutex. Future operating systems may +provide efficient spin locks, but we cannot count on that. + +Another reason for implementing a spin lock is that on multiprocessor systems +it can be more efficient for a processor to run a loop waiting for the +semaphore to be released than to switch to a different thread. A thread switch +takes 25 us on both platforms mentioned above. See Gray and Reuter's book +Transaction processing for background. + +How long should the spin loop last before suspending the thread? On a +uniprocessor, spinning does not help at all, because if the thread owning the +mutex is not executing, it cannot be released. Spinning actually wastes +resources. + +On a multiprocessor, we do not know if the thread owning the mutex is +executing or not. Thus it would make sense to spin as long as the operation +guarded by the mutex would typically last assuming that the thread is +executing. If the mutex is not released by that time, we may assume that the +thread owning the mutex is not executing and suspend the waiting thread. + +A typical operation (where no i/o involved) guarded by a mutex or a read-write +lock may last 1 - 20 us on the current Pentium platform. The longest +operations are the binary searches on an index node. + +We conclude that the best choice is to set the spin time at 20 us. Then the +system should work well on a multiprocessor. On a uniprocessor we have to +make sure that thread swithches due to mutex collisions are not frequent, +i.e., they do not happen every 100 us or so, because that wastes too much +resources. If the thread switches are not frequent, the 20 us wasted in spin +loop is not too much. + +Empirical studies on the effect of spin time should be done for different +platforms. + + + IMPLEMENTATION OF THE MUTEX + =========================== + +For background, see Curt Schimmel's book on Unix implementation on modern +architectures. The key points in the implementation are atomicity and +serialization of memory accesses. The test-and-set instruction (XCHG in +Pentium) must be atomic. As new processors may have weak memory models, also +serialization of memory references may be necessary. The successor of Pentium, +P6, has at least one mode where the memory model is weak. As far as we know, +in Pentium all memory accesses are serialized in the program order and we do +not have to worry about the memory model. On other processors there are +special machine instructions called a fence, memory barrier, or storage +barrier (STBAR in Sparc), which can be used to serialize the memory accesses +to happen in program order relative to the fence instruction. + +Leslie Lamport has devised a "bakery algorithm" to implement a mutex without +the atomic test-and-set, but his algorithm should be modified for weak memory +models. We do not use Lamport's algorithm, because we guess it is slower than +the atomic test-and-set. + +Our mutex implementation works as follows: After that we perform the atomic +test-and-set instruction on the memory word. If the test returns zero, we +know we got the lock first. If the test returns not zero, some other thread +was quicker and got the lock: then we spin in a loop reading the memory word, +waiting it to become zero. It is wise to just read the word in the loop, not +perform numerous test-and-set instructions, because they generate memory +traffic between the cache and the main memory. The read loop can just access +the cache, saving bus bandwidth. + +If we cannot acquire the mutex lock in the specified time, we reserve a cell +in the wait array, set the waiters byte in the mutex to 1. To avoid a race +condition, after setting the waiters byte and before suspending the waiting +thread, we still have to check that the mutex is reserved, because it may +have happened that the thread which was holding the mutex has just released +it and did not see the waiters byte set to 1, a case which would lead the +other thread to an infinite wait. + +LEMMA 1: After a thread resets the event of the cell it reserves for waiting +======== +for a mutex, some thread will eventually call sync_array_signal_object with +the mutex as an argument. Thus no infinite wait is possible. + +Proof: After making the reservation the thread sets the waiters field in the +mutex to 1. Then it checks that the mutex is still reserved by some thread, +or it reserves the mutex for itself. In any case, some thread (which may be +also some earlier thread, not necessarily the one currently holding the mutex) +will set the waiters field to 0 in mutex_exit, and then call +sync_array_signal_object with the mutex as an argument. +Q.E.D. */ + +ulint sync_dummy = 0; + +/* The number of system calls made in this module. Intended for performance +monitoring. */ + +ulint mutex_system_call_count = 0; + +/* Number of spin waits on mutexes: for performance monitoring */ + +ulint mutex_spin_round_count = 0; +ulint mutex_spin_wait_count = 0; +ulint mutex_exit_count = 0; + +/* The global array of wait cells for implementation of the database's own +mutexes and read-write locks */ +sync_array_t* sync_primary_wait_array; + +/* This variable is set to TRUE when sync_init is called */ +ibool sync_initialized = FALSE; + +/* Global list of database mutexes (not OS mutexes) created. */ +UT_LIST_BASE_NODE_T(mutex_t) mutex_list; + +/* Mutex protecting the mutex_list variable */ +mutex_t mutex_list_mutex; + +typedef struct sync_level_struct sync_level_t; +typedef struct sync_thread_struct sync_thread_t; + +/* The latch levels currently owned by threads are stored in this data +structure; the size of this array is OS_THREAD_MAX_N */ + +sync_thread_t* sync_thread_level_arrays; + +/* Mutex protecting sync_thread_level_arrays */ +mutex_t sync_thread_mutex; + +/* Latching order checks start when this is set TRUE */ +ibool sync_order_checks_on = FALSE; + +/* Dummy mutex used to implement mutex_fence */ +mutex_t dummy_mutex_for_fence; + +struct sync_thread_struct{ + os_thread_id_t id; /* OS thread id */ + sync_level_t* levels; /* level array for this thread; if this is NULL + this slot is unused */ +}; + +/* Number of slots reserved for each OS thread in the sync level array */ +#define SYNC_THREAD_N_LEVELS 256 + +struct sync_level_struct{ + void* latch; /* pointer to a mutex or an rw-lock; NULL means that + the slot is empty */ + ulint level; /* level of the latch in the latching order */ +}; + +/********************************************************************** +Creates, or rather, initializes a mutex object in a specified memory +location (which must be appropriately aligned). The mutex is initialized +in the reset state. Explicit freeing of the mutex with mutex_free is +necessary only if the memory block containing it is freed. */ + +void +mutex_create_func( +/*==============*/ + mutex_t* mutex, /* in: pointer to memory */ + char* cfile_name, /* in: file name where created */ + ulint cline) /* in: file line where created */ +{ +#ifdef _WIN32 + mutex_reset_lock_word(mutex); +#else + os_fast_mutex_init(&(mutex->os_fast_mutex)); + mutex->lock_word = 0; +#endif + mutex_set_waiters(mutex, 0); + mutex->magic_n = MUTEX_MAGIC_N; + mutex->line = 0; + mutex->file_name = "FILE NOT KNOWN"; + mutex->thread_id = ULINT_UNDEFINED; + mutex->level = SYNC_LEVEL_NONE; + ut_memcpy(&(mutex->cfile_name), cfile_name, + ut_min(MUTEX_CNAME_LEN - 1, ut_strlen(cfile_name))); + mutex->cfile_name[MUTEX_CNAME_LEN - 1] = '\0'; + mutex->cline = cline; + + /* Check that lock_word is aligned; this is important on Intel */ + + ut_a(((ulint)(&(mutex->lock_word))) % 4 == 0); + + /* NOTE! The very first mutexes are not put to the mutex list */ + + if ((mutex == &mutex_list_mutex) || (mutex == &sync_thread_mutex)) { + + return; + } + + mutex_enter(&mutex_list_mutex); + + UT_LIST_ADD_FIRST(list, mutex_list, mutex); + + mutex_exit(&mutex_list_mutex); +} + +/********************************************************************** +Calling this function is obligatory only if the memory buffer containing +the mutex is freed. Removes a mutex object from the mutex list. The mutex +is checked to be in the reset state. */ + +void +mutex_free( +/*=======*/ + mutex_t* mutex) /* in: mutex */ +{ + ut_ad(mutex_validate(mutex)); + ut_a(mutex_get_lock_word(mutex) == 0); + ut_a(mutex_get_waiters(mutex) == 0); + + mutex_enter(&mutex_list_mutex); + + UT_LIST_REMOVE(list, mutex_list, mutex); + + mutex_exit(&mutex_list_mutex); + +#ifndef _WIN32 + os_fast_mutex_free(&(mutex->os_fast_mutex)); +#endif + /* If we free the mutex protecting the mutex list (freeing is + not necessary), we have to reset the magic number AFTER removing + it from the list. */ + + mutex->magic_n = 0; +} + +/************************************************************************ +Tries to lock the mutex for the current thread. If the lock is not acquired +immediately, returns with return value 1. */ + +ulint +mutex_enter_nowait( +/*===============*/ + /* out: 0 if succeed, 1 if not */ + mutex_t* mutex) /* in: pointer to mutex */ +{ + ut_ad(mutex_validate(mutex)); + + if (!mutex_test_and_set(mutex)) { + + #ifdef UNIV_SYNC_DEBUG + mutex_set_debug_info(mutex, __FILE__, __LINE__); + #endif + + return(0); /* Succeeded! */ + } + + return(1); +} + +/********************************************************************** +Checks that the mutex has been initialized. */ + +ibool +mutex_validate( +/*===========*/ + mutex_t* mutex) +{ + ut_a(mutex); + ut_a(mutex->magic_n == MUTEX_MAGIC_N); + + return(TRUE); +} + +/********************************************************************** +Sets the waiters field in a mutex. */ + +void +mutex_set_waiters( +/*==============*/ + mutex_t* mutex, /* in: mutex */ + ulint n) /* in: value to set */ +{ +volatile ulint* ptr; /* declared volatile to ensure that + the value is stored to memory */ + ut_ad(mutex); + + ptr = &(mutex->waiters); + + *ptr = n; /* Here we assume that the write of a single + word in memory is atomic */ +} + +/********************************************************************** +Reserves a mutex for the current thread. If the mutex is reserved, the +function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting +for the mutex before suspending the thread. */ + +void +mutex_spin_wait( +/*============*/ + mutex_t* mutex /* in: pointer to mutex */ + + #ifdef UNIV_SYNC_DEBUG + ,char* file_name, /* in: file name where mutex requested */ + ulint line /* in: line where requested */ + #endif +) +{ + ulint index; /* index of the reserved wait cell */ + ulint i; /* spin round count */ + + ut_ad(mutex); + +mutex_loop: + + i = 0; + + /* Spin waiting for the lock word to become zero. Note that we do not + have to assume that the read access to the lock word is atomic, as the + actual locking is always committed with atomic test-and-set. In + reality, however, all processors probably have an atomic read of a + memory word. */ + +spin_loop: + mutex_spin_wait_count++; + + while (mutex_get_lock_word(mutex) != 0 && i < SYNC_SPIN_ROUNDS) { + + if (srv_spin_wait_delay) { + ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); + } + + i++; + } + + if (i == SYNC_SPIN_ROUNDS) { + os_thread_yield(); + } + + if (srv_print_latch_waits) { + printf( + "Thread %lu spin wait mutex at %lx cfile %s cline %lu rnds %lu\n", + os_thread_get_curr_id(), (ulint)mutex, &(mutex->cfile_name), + mutex->cline, i); + } + + mutex_spin_round_count += i; + + if (mutex_test_and_set(mutex) == 0) { + /* Succeeded! */ + + #ifdef UNIV_SYNC_DEBUG + mutex_set_debug_info(mutex, file_name, line); + #endif + + return; + } + + if (i < SYNC_SPIN_ROUNDS) { + + goto spin_loop; + } + + sync_array_reserve_cell(sync_primary_wait_array, mutex, + SYNC_MUTEX, + #ifdef UNIV_SYNC_DEBUG + file_name, line, + #endif + &index); + + mutex_system_call_count++; + + /* The memory order of the array reservation and the change in the + waiters field is important: when we suspend a thread, we first + reserve the cell and then set waiters field to 1. When threads are + released in mutex_exit, the waiters field is first set to zero and + then the event is set to the signaled state. */ + + mutex_set_waiters(mutex, 1); + + if (mutex_test_and_set(mutex) == 0) { + + /* Succeeded! Free the reserved wait cell */ + + sync_array_free_cell(sync_primary_wait_array, index); + + #ifdef UNIV_SYNC_DEBUG + mutex_set_debug_info(mutex, file_name, line); + #endif + + if (srv_print_latch_waits) { + printf( + "Thread %lu spin wait succeeds at 2: mutex at %lx\n", + os_thread_get_curr_id(), (ulint)mutex); + } + + return; + + /* Note that in this case we leave the waiters field + set to 1. We cannot reset it to zero, as we do not know + if there are other waiters. */ + } + + /* Now we know that there has been some thread holding the mutex + after the change in the wait array and the waiters field was made. + Now there is no risk of infinite wait on the event. */ + + if (srv_print_latch_waits) { + printf( + "Thread %lu OS wait mutex at %lx cfile %s cline %lu rnds %lu\n", + os_thread_get_curr_id(), (ulint)mutex, &(mutex->cfile_name), + mutex->cline, i); + } + + mutex_system_call_count++; + sync_array_wait_event(sync_primary_wait_array, index); + + goto mutex_loop; +} + +/********************************************************************** +Releases the threads waiting in the primary wait array for this mutex. */ + +void +mutex_signal_object( +/*================*/ + mutex_t* mutex) /* in: mutex */ +{ + mutex_set_waiters(mutex, 0); + + /* The memory order of resetting the waiters field and + signaling the object is important. See LEMMA 1 above. */ + + sync_array_signal_object(sync_primary_wait_array, mutex); +} + +/********************************************************************** +Sets the debug information for a reserved mutex. */ + +void +mutex_set_debug_info( +/*=================*/ + mutex_t* mutex, /* in: mutex */ + char* file_name, /* in: file where requested */ + ulint line) /* in: line where requested */ +{ + ut_ad(mutex); + ut_ad(file_name); + + sync_thread_add_level(mutex, mutex->level); + + mutex->file_name = file_name; + mutex->line = line; + mutex->thread_id = os_thread_get_curr_id(); +} + +/********************************************************************** +Gets the debug information for a reserved mutex. */ + +void +mutex_get_debug_info( +/*=================*/ + mutex_t* mutex, /* in: mutex */ + char** file_name, /* out: file where requested */ + ulint* line, /* out: line where requested */ + os_thread_id_t* thread_id) /* out: id of the thread which owns + the mutex */ +{ + ut_ad(mutex); + + *file_name = mutex->file_name; + *line = mutex->line; + *thread_id = mutex->thread_id; +} + +/********************************************************************** +Sets the mutex latching level field. */ + +void +mutex_set_level( +/*============*/ + mutex_t* mutex, /* in: mutex */ + ulint level) /* in: level */ +{ + mutex->level = level; +} + +/********************************************************************** +Checks that the current thread owns the mutex. Works only in the debug +version. */ + +ibool +mutex_own( +/*======*/ + /* out: TRUE if owns */ + mutex_t* mutex) /* in: mutex */ +{ + ut_a(mutex_validate(mutex)); + + if (mutex_get_lock_word(mutex) != 1) { + + return(FALSE); + } + + if (mutex->thread_id != os_thread_get_curr_id()) { + + return(FALSE); + } + + return(TRUE); +} + +/********************************************************************** +Prints debug info of currently reserved mutexes. */ + +void +mutex_list_print_info(void) +/*=======================*/ +{ +#ifndef UNIV_SYNC_DEBUG + printf("Sorry, cannot give mutex list info in non-debug version!\n"); +#else + mutex_t* mutex; + char* file_name; + ulint line; + os_thread_id_t thread_id; + ulint count = 0; + + printf("-----------------------------------------------\n"); + printf("MUTEX INFO\n"); + + mutex_enter(&mutex_list_mutex); + + mutex = UT_LIST_GET_FIRST(mutex_list); + + while (mutex != NULL) { + count++; + + if (mutex_get_lock_word(mutex) != 0) { + + mutex_get_debug_info(mutex, &file_name, &line, &thread_id); + + printf("Locked mutex: addr %lx thread %ld file %s line %ld\n", + (ulint)mutex, thread_id, file_name, line); + } + + mutex = UT_LIST_GET_NEXT(list, mutex); + } + + printf("Total number of mutexes %ld\n", count); + + mutex_exit(&mutex_list_mutex); +#endif +} + +/********************************************************************** +Counts currently reserved mutexes. Works only in the debug version. */ + +ulint +mutex_n_reserved(void) +/*==================*/ +{ +#ifndef UNIV_SYNC_DEBUG + printf("Sorry, cannot give mutex info in non-debug version!\n"); + ut_error; + + return(0); +#else + mutex_t* mutex; + ulint count = 0; + + mutex_enter(&mutex_list_mutex); + + mutex = UT_LIST_GET_FIRST(mutex_list); + + while (mutex != NULL) { + if (mutex_get_lock_word(mutex) != 0) { + + count++; + } + + mutex = UT_LIST_GET_NEXT(list, mutex); + } + + mutex_exit(&mutex_list_mutex); + + ut_a(count >= 1); + + return(count - 1); /* Subtract one, because this function itself + was holding one mutex (mutex_list_mutex) */ +#endif +} + +/********************************************************************** +Returns TRUE if no mutex or rw-lock is currently locked. Works only in +the debug version. */ + +ibool +sync_all_freed(void) +/*================*/ +{ + #ifdef UNIV_SYNC_DEBUG + if (mutex_n_reserved() + rw_lock_n_locked() == 0) { + + return(TRUE); + } else { + return(FALSE); + } + #else + ut_error; + + return(FALSE); + #endif +} + +/********************************************************************** +Gets the value in the nth slot in the thread level arrays. */ +static +sync_thread_t* +sync_thread_level_arrays_get_nth( +/*=============================*/ + /* out: pointer to thread slot */ + ulint n) /* in: slot number */ +{ + ut_ad(n < OS_THREAD_MAX_N); + + return(sync_thread_level_arrays + n); +} + +/********************************************************************** +Looks for the thread slot for the calling thread. */ +static +sync_thread_t* +sync_thread_level_arrays_find_slot(void) +/*====================================*/ + /* out: pointer to thread slot, NULL if not found */ + +{ + sync_thread_t* slot; + os_thread_id_t id; + ulint i; + + id = os_thread_get_curr_id(); + + for (i = 0; i < OS_THREAD_MAX_N; i++) { + + slot = sync_thread_level_arrays_get_nth(i); + + if (slot->levels && (slot->id == id)) { + + return(slot); + } + } + + return(NULL); +} + +/********************************************************************** +Looks for an unused thread slot. */ +static +sync_thread_t* +sync_thread_level_arrays_find_free(void) +/*====================================*/ + /* out: pointer to thread slot */ + +{ + sync_thread_t* slot; + ulint i; + + for (i = 0; i < OS_THREAD_MAX_N; i++) { + + slot = sync_thread_level_arrays_get_nth(i); + + if (slot->levels == NULL) { + + return(slot); + } + } + + return(NULL); +} + +/********************************************************************** +Gets the value in the nth slot in the thread level array. */ +static +sync_level_t* +sync_thread_levels_get_nth( +/*=======================*/ + /* out: pointer to level slot */ + sync_level_t* arr, /* in: pointer to level array for an OS + thread */ + ulint n) /* in: slot number */ +{ + ut_ad(n < SYNC_THREAD_N_LEVELS); + + return(arr + n); +} + +/********************************************************************** +Checks if all the level values stored in the level array are greater than +the given limit. */ +static +ibool +sync_thread_levels_g( +/*=================*/ + /* out: TRUE if all greater */ + sync_level_t* arr, /* in: pointer to level array for an OS + thread */ + ulint limit) /* in: level limit */ +{ + sync_level_t* slot; + rw_lock_t* lock; + mutex_t* mutex; + ulint i; + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(arr, i); + + if (slot->latch != NULL) { + if (slot->level <= limit) { + + lock = slot->latch; + mutex = slot->latch; + + ut_error; + + return(FALSE); + } + } + } + + return(TRUE); +} + +/********************************************************************** +Checks if the level value is stored in the level array. */ +static +ibool +sync_thread_levels_contain( +/*=======================*/ + /* out: TRUE if stored */ + sync_level_t* arr, /* in: pointer to level array for an OS + thread */ + ulint level) /* in: level */ +{ + sync_level_t* slot; + ulint i; + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(arr, i); + + if (slot->latch != NULL) { + if (slot->level == level) { + + return(TRUE); + } + } + } + + return(FALSE); +} + +/********************************************************************** +Checks that the level array for the current thread is empty. */ + +ibool +sync_thread_levels_empty_gen( +/*=========================*/ + /* out: TRUE if empty except the + exceptions specified below */ + ibool dict_mutex_allowed) /* in: TRUE if dictionary mutex is + allowed to be owned by the thread, + also purge_is_running mutex is + allowed */ +{ + sync_level_t* arr; + sync_thread_t* thread_slot; + sync_level_t* slot; + rw_lock_t* lock; + mutex_t* mutex; + ulint i; + + if (!sync_order_checks_on) { + + return(TRUE); + } + + mutex_enter(&sync_thread_mutex); + + thread_slot = sync_thread_level_arrays_find_slot(); + + if (thread_slot == NULL) { + + mutex_exit(&sync_thread_mutex); + + return(TRUE); + } + + arr = thread_slot->levels; + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(arr, i); + + if (slot->latch != NULL && (!dict_mutex_allowed || + (slot->level != SYNC_DICT + && slot->level != SYNC_PURGE_IS_RUNNING))) { + + lock = slot->latch; + mutex = slot->latch; + mutex_exit(&sync_thread_mutex); + + sync_print(); + ut_error; + + return(FALSE); + } + } + + mutex_exit(&sync_thread_mutex); + + return(TRUE); +} + +/********************************************************************** +Checks that the level array for the current thread is empty. */ + +ibool +sync_thread_levels_empty(void) +/*==========================*/ + /* out: TRUE if empty */ +{ + return(sync_thread_levels_empty_gen(FALSE)); +} + +/********************************************************************** +Adds a latch and its level in the thread level array. Allocates the memory +for the array if called first time for this OS thread. Makes the checks +against other latch levels stored in the array for this thread. */ + +void +sync_thread_add_level( +/*==================*/ + void* latch, /* in: pointer to a mutex or an rw-lock */ + ulint level) /* in: level in the latching order; if SYNC_LEVEL_NONE, + nothing is done */ +{ + sync_level_t* array; + sync_level_t* slot; + sync_thread_t* thread_slot; + ulint i; + + if (!sync_order_checks_on) { + + return; + } + + if ((latch == (void*)&sync_thread_mutex) + || (latch == (void*)&mutex_list_mutex) + || (latch == (void*)&rw_lock_debug_mutex) + || (latch == (void*)&rw_lock_list_mutex)) { + + return; + } + + if (level == SYNC_LEVEL_NONE) { + + return; + } + + mutex_enter(&sync_thread_mutex); + + thread_slot = sync_thread_level_arrays_find_slot(); + + if (thread_slot == NULL) { + /* We have to allocate the level array for a new thread */ + array = ut_malloc(sizeof(sync_level_t) * SYNC_THREAD_N_LEVELS); + + thread_slot = sync_thread_level_arrays_find_free(); + + thread_slot->id = os_thread_get_curr_id(); + thread_slot->levels = array; + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(array, i); + + slot->latch = NULL; + } + } + + array = thread_slot->levels; + + /* NOTE that there is a problem with _NODE and _LEAF levels: if the + B-tree height changes, then a leaf can change to an internal node + or the other way around. We do not know at present if this can cause + unnecessary assertion failures below. */ + + if (level == SYNC_NO_ORDER_CHECK) { + /* Do no order checking */ + + } else if (level == SYNC_MEM_POOL) { + ut_a(sync_thread_levels_g(array, SYNC_MEM_POOL)); + } else if (level == SYNC_MEM_HASH) { + ut_a(sync_thread_levels_g(array, SYNC_MEM_HASH)); + } else if (level == SYNC_RECV) { + ut_a(sync_thread_levels_g(array, SYNC_RECV)); + } else if (level == SYNC_LOG) { + ut_a(sync_thread_levels_g(array, SYNC_LOG)); + } else if (level == SYNC_ANY_LATCH) { + ut_a(sync_thread_levels_g(array, SYNC_ANY_LATCH)); + } else if (level == SYNC_TRX_SYS_HEADER) { + ut_a(sync_thread_levels_contain(array, SYNC_KERNEL)); + } else if (level == SYNC_BUF_BLOCK) { + ut_a((sync_thread_levels_contain(array, SYNC_BUF_POOL) + && sync_thread_levels_g(array, SYNC_BUF_BLOCK - 1)) + || sync_thread_levels_g(array, SYNC_BUF_BLOCK)); + } else if (level == SYNC_BUF_POOL) { + ut_a(sync_thread_levels_g(array, SYNC_BUF_POOL)); + } else if (level == SYNC_SEARCH_SYS) { + ut_a(sync_thread_levels_g(array, SYNC_SEARCH_SYS)); + } else if (level == SYNC_TRX_LOCK_HEAP) { + ut_a(sync_thread_levels_g(array, SYNC_TRX_LOCK_HEAP)); + } else if (level == SYNC_REC_LOCK) { + ut_a((sync_thread_levels_contain(array, SYNC_KERNEL) + && sync_thread_levels_g(array, SYNC_REC_LOCK - 1)) + || sync_thread_levels_g(array, SYNC_REC_LOCK)); + } else if (level == SYNC_KERNEL) { + ut_a(sync_thread_levels_g(array, SYNC_KERNEL)); + } else if (level == SYNC_IBUF_BITMAP) { + ut_a((sync_thread_levels_contain(array, SYNC_IBUF_BITMAP_MUTEX) + && sync_thread_levels_g(array, SYNC_IBUF_BITMAP - 1)) + || sync_thread_levels_g(array, SYNC_IBUF_BITMAP)); + } else if (level == SYNC_IBUF_BITMAP_MUTEX) { + ut_a(sync_thread_levels_g(array, SYNC_IBUF_BITMAP_MUTEX)); + } else if (level == SYNC_FSP_PAGE) { + ut_a(sync_thread_levels_contain(array, SYNC_FSP)); + } else if (level == SYNC_FSP) { + ut_a(sync_thread_levels_contain(array, SYNC_FSP) + || sync_thread_levels_g(array, SYNC_FSP)); + } else if (level == SYNC_TRX_UNDO_PAGE) { + ut_a(sync_thread_levels_contain(array, SYNC_TRX_UNDO) + || sync_thread_levels_contain(array, SYNC_RSEG) + || sync_thread_levels_contain(array, SYNC_PURGE_SYS) + || sync_thread_levels_g(array, SYNC_TRX_UNDO_PAGE)); + } else if (level == SYNC_RSEG_HEADER) { + ut_a(sync_thread_levels_contain(array, SYNC_RSEG)); + } else if (level == SYNC_RSEG_HEADER_NEW) { + ut_a(sync_thread_levels_contain(array, SYNC_KERNEL) + && sync_thread_levels_contain(array, SYNC_FSP_PAGE)); + } else if (level == SYNC_RSEG) { + ut_a(sync_thread_levels_g(array, SYNC_RSEG)); + } else if (level == SYNC_TRX_UNDO) { + ut_a(sync_thread_levels_g(array, SYNC_TRX_UNDO)); + } else if (level == SYNC_PURGE_LATCH) { + ut_a(sync_thread_levels_g(array, SYNC_PURGE_LATCH)); + } else if (level == SYNC_PURGE_SYS) { + ut_a(sync_thread_levels_g(array, SYNC_PURGE_SYS)); + } else if (level == SYNC_TREE_NODE) { + ut_a(sync_thread_levels_contain(array, SYNC_INDEX_TREE) + || sync_thread_levels_g(array, SYNC_TREE_NODE - 1)); + } else if (level == SYNC_TREE_NODE_FROM_HASH) { + ut_a(1); + } else if (level == SYNC_TREE_NODE_NEW) { + ut_a(sync_thread_levels_contain(array, SYNC_FSP_PAGE) + || sync_thread_levels_contain(array, SYNC_IBUF_MUTEX)); + } else if (level == SYNC_INDEX_TREE) { + ut_a((sync_thread_levels_contain(array, SYNC_IBUF_MUTEX) + && sync_thread_levels_contain(array, SYNC_FSP) + && sync_thread_levels_g(array, SYNC_FSP_PAGE - 1)) + || sync_thread_levels_g(array, SYNC_TREE_NODE - 1)); + } else if (level == SYNC_IBUF_MUTEX) { + ut_a(sync_thread_levels_g(array, SYNC_FSP_PAGE - 1)); + } else if (level == SYNC_IBUF_PESS_INSERT_MUTEX) { + ut_a(sync_thread_levels_g(array, SYNC_FSP - 1) + && !sync_thread_levels_contain(array, SYNC_IBUF_MUTEX)); + } else if (level == SYNC_IBUF_HEADER) { + ut_a(sync_thread_levels_g(array, SYNC_FSP - 1) + && !sync_thread_levels_contain(array, SYNC_IBUF_MUTEX) + && !sync_thread_levels_contain(array, + SYNC_IBUF_PESS_INSERT_MUTEX)); + } else if (level == SYNC_DICT_HEADER) { + ut_a(sync_thread_levels_g(array, SYNC_DICT_HEADER)); + } else if (level == SYNC_PURGE_IS_RUNNING) { + ut_a(sync_thread_levels_g(array, SYNC_PURGE_IS_RUNNING)); + } else if (level == SYNC_DICT) { + ut_a(buf_debug_prints + || sync_thread_levels_g(array, SYNC_DICT)); + } else { + ut_error; + } + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(array, i); + + if (slot->latch == NULL) { + slot->latch = latch; + slot->level = level; + + break; + } + } + + ut_a(i < SYNC_THREAD_N_LEVELS); + + mutex_exit(&sync_thread_mutex); +} + +/********************************************************************** +Removes a latch from the thread level array if it is found there. */ + +ibool +sync_thread_reset_level( +/*====================*/ + /* out: TRUE if found from the array; it is an error + if the latch is not found */ + void* latch) /* in: pointer to a mutex or an rw-lock */ +{ + sync_level_t* array; + sync_level_t* slot; + sync_thread_t* thread_slot; + ulint i; + + if (!sync_order_checks_on) { + + return(FALSE); + } + + if ((latch == (void*)&sync_thread_mutex) + || (latch == (void*)&mutex_list_mutex) + || (latch == (void*)&rw_lock_debug_mutex) + || (latch == (void*)&rw_lock_list_mutex)) { + + return(FALSE); + } + + mutex_enter(&sync_thread_mutex); + + thread_slot = sync_thread_level_arrays_find_slot(); + + if (thread_slot == NULL) { + + ut_error; + + mutex_exit(&sync_thread_mutex); + return(FALSE); + } + + array = thread_slot->levels; + + for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { + + slot = sync_thread_levels_get_nth(array, i); + + if (slot->latch == latch) { + slot->latch = NULL; + + mutex_exit(&sync_thread_mutex); + + return(TRUE); + } + } + + ut_error; + + mutex_exit(&sync_thread_mutex); + + return(FALSE); +} + +/********************************************************************** +Initializes the synchronization data structures. */ + +void +sync_init(void) +/*===========*/ +{ + sync_thread_t* thread_slot; + ulint i; + + ut_a(sync_initialized == FALSE); + + sync_initialized = TRUE; + + /* Create the primary system wait array which is protected by an OS + mutex */ + + sync_primary_wait_array = sync_array_create(OS_THREAD_MAX_N, + SYNC_ARRAY_OS_MUTEX); + + /* Create the thread latch level array where the latch levels + are stored for each OS thread */ + + sync_thread_level_arrays = ut_malloc(OS_THREAD_MAX_N + * sizeof(sync_thread_t)); + for (i = 0; i < OS_THREAD_MAX_N; i++) { + + thread_slot = sync_thread_level_arrays_get_nth(i); + thread_slot->levels = NULL; + } + + /* Init the mutex list and create the mutex to protect it. */ + + UT_LIST_INIT(mutex_list); + mutex_create(&mutex_list_mutex); + mutex_set_level(&mutex_list_mutex, SYNC_NO_ORDER_CHECK); + + mutex_create(&sync_thread_mutex); + mutex_set_level(&sync_thread_mutex, SYNC_NO_ORDER_CHECK); + + /* Init the rw-lock list and create the mutex to protect it. */ + + UT_LIST_INIT(rw_lock_list); + mutex_create(&rw_lock_list_mutex); + mutex_set_level(&rw_lock_list_mutex, SYNC_NO_ORDER_CHECK); + + mutex_create(&rw_lock_debug_mutex); + mutex_set_level(&rw_lock_debug_mutex, SYNC_NO_ORDER_CHECK); + + rw_lock_debug_event = os_event_create(NULL); + rw_lock_debug_waiters = FALSE; +} + +/********************************************************************** +Frees the resources in synchronization data structures. */ + +void +sync_close(void) +/*===========*/ +{ + sync_array_free(sync_primary_wait_array); +} + +/*********************************************************************** +Prints wait info of the sync system. */ + +void +sync_print_wait_info(void) +/*======================*/ +{ + printf( + "Mut ex %lu sp %lu r %lu sys %lu; rws %lu %lu %lu; rwx %lu %lu %lu\n", + mutex_exit_count, + mutex_spin_wait_count, mutex_spin_round_count, + mutex_system_call_count, + rw_s_exit_count, + rw_s_spin_wait_count, rw_s_system_call_count, + rw_x_exit_count, + rw_x_spin_wait_count, rw_x_system_call_count); +} + +/*********************************************************************** +Prints info of the sync system. */ + +void +sync_print(void) +/*============*/ +{ + printf("SYNC INFO:------------------------------------------\n"); + mutex_list_print_info(); + rw_lock_list_print_info(); + sync_array_print_info(sync_primary_wait_array); + sync_print_wait_info(); + printf("----------------------------------------------------\n"); +} diff --git a/innobase/sync/ts/makefile b/innobase/sync/ts/makefile new file mode 100644 index 00000000000..95011f51466 --- /dev/null +++ b/innobase/sync/ts/makefile @@ -0,0 +1,14 @@ + + + +include ..\..\makefile.i + +tssync: ..\sync.lib tssync.c makefile + $(CCOM) $(CFL) -I.. -I..\.. ..\sync.lib ..\..\mach.lib ..\..\ut.lib ..\..\mem.lib ..\..\os.lib tssync.c $(LFL) + + + + + + + diff --git a/innobase/sync/ts/tssync.c b/innobase/sync/ts/tssync.c new file mode 100644 index 00000000000..bf30a603284 --- /dev/null +++ b/innobase/sync/ts/tssync.c @@ -0,0 +1,1366 @@ +/************************************************************************ +The test module for the syncronization primitives + +(c) 1995 Innobase Oy + +Created 9/9/1995 Heikki Tuuri +*************************************************************************/ + + +#include "../sync0sync.h" +#include "../sync0rw.h" +#include "../sync0arr.h" +#include "../sync0ipm.h" +#include "ut0ut.h" +#include "mem0mem.h" +#include "os0sync.h" +#include "os0thread.h" +#include "os0sync.h" + +mutex_t mutex; +mutex_t mutex1; +mutex_t mutex2; +mutex_t mutex3; +mutex_t mutex4; + +ip_mutex_t ip_mutex; + +ip_mutex_t ip_mutex1; +ip_mutex_t ip_mutex2; +ip_mutex_t ip_mutex3; +ip_mutex_t ip_mutex4; + +ip_mutex_hdl_t* iph; + +ip_mutex_hdl_t* iph1; +ip_mutex_hdl_t* iph2; +ip_mutex_hdl_t* iph3; +ip_mutex_hdl_t* iph4; + + +rw_lock_t rw1; +rw_lock_t rw2; +rw_lock_t rw3; +rw_lock_t rw4; + +rw_lock_t rw9; +rw_lock_t rw10; +mutex_t mutex9; + +os_mutex_t osm; + +ulint last_thr; +ulint switch_count; +ulint glob_count; +ulint glob_inc; +ulint rc; + +bool qprint = FALSE; + +/******************************************************************** +Start function for thread 1 in test1. */ +ulint +thread1(void* arg) +/*==============*/ +{ + ulint i, j; + void* arg2; + + arg2 = arg; + + printf("Thread1 started!\n"); + + mutex_enter(&mutex); + + printf("Thread1 owns now the mutex!\n"); + + j = 0; + + for (i = 1; i < 1000000; i++) { + j += i; + } + + printf("Thread1 releases now the mutex!\n"); + + mutex_exit(&mutex); + + return(j); +} + +/******************************************************************** +Start function for thread 2 in test1. */ +ulint +thread2(void* arg) +/*==============*/ +{ + ulint i, j; + void* arg2; + + arg2 = arg; + + printf("Thread2 started!\n"); + + mutex_enter(&mutex); + + printf("Thread2 owns now the mutex!\n"); + + j = 0; + + for (i = 1; i < 1000000; i++) { + j += i; + } + + printf("Thread2 releases now the mutex!\n"); + + mutex_exit(&mutex); + + return(j); +} + +/******************************************************************** +Start function for the competing threads in test2. The function tests +the behavior lock-coupling through 4 mutexes. */ + +ulint +thread_n(volatile void* arg) +/*========================*/ +{ + ulint i, j, k, n; + + n = *((ulint*)arg); + + printf("Thread %ld started!\n", n); + + for (k = 0; k < 2000 * UNIV_DBC; k++) { + + mutex_enter(&mutex1); + + if (last_thr != n) { + switch_count++; + last_thr = n; + } + + j = 0; + + for (i = 1; i < 400; i++) { + j += i; + } + + mutex_enter(&mutex2); + + mutex_exit(&mutex1); + + for (i = 1; i < 400; i++) { + j += i; + } + mutex_enter(&mutex3); + + mutex_exit(&mutex2); + + for (i = 1; i < 400; i++) { + j += i; + } + mutex_enter(&mutex4); + + mutex_exit(&mutex3); + + for (i = 1; i < 400; i++) { + j += i; + } + + mutex_exit(&mutex4); + } + + printf("Thread %ld exits!\n", n); + + return(j); +} + +/******************************************************************** +Start function for mutex exclusion checking in test3. */ + +ulint +thread_x(void* arg) +/*===============*/ +{ + ulint k; + void* arg2; + + arg2 = arg; + + printf("Starting thread!\n"); + + for (k = 0; k < 200000 * UNIV_DBC; k++) { + + mutex_enter(&mutex); + + glob_count += glob_inc; + + mutex_exit(&mutex); + + } + + printf("Exiting thread!\n"); + + return(0); +} + + + +void +test1(void) +/*=======*/ +{ + os_thread_t thr1, thr2; + os_thread_id_t id1, id2; + ulint i, j; + ulint tm, oldtm; + ulint* lp; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 1. Test of mutexes.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + osm = os_mutex_create(NULL); + + os_mutex_enter(osm); + os_mutex_exit(osm); + + os_mutex_free(osm); + + + mutex_create(&mutex); + + lp = &j; + + oldtm = ut_clock(); + + for (i = 0; i < 1000000; i++) { + id1 = os_thread_get_curr_id(); + } + + tm = ut_clock(); + printf("Wall clock time for %ld thread_get_id %ld milliseconds\n", + i, tm - oldtm); + + + oldtm = ut_clock(); + + for (i = 0; i < 100000 * UNIV_DBC; i++) { + + mutex_enter(&mutex); + mutex_exit(&mutex); + } + + tm = ut_clock(); + printf("Wall clock time for %ld mutex lock-unlock %ld milliseconds\n", + i, tm - oldtm); + + oldtm = ut_clock(); + + for (i = 0; i < 1000000; i++) { + + mutex_fence(); + } + + tm = ut_clock(); + printf("Wall clock time for %ld fences %ld milliseconds\n", + i, tm - oldtm); + + mutex_enter(&mutex); + + mutex_list_print_info(); + + ut_ad(1 == mutex_n_reserved()); + ut_ad(FALSE == sync_all_freed()); + + thr1 = os_thread_create(thread1, + NULL, + &id1); + + printf("Thread1 created, id %ld \n", id1); + + thr2 = os_thread_create(thread2, + NULL, + &id2); + + printf("Thread2 created, id %ld \n", id2); + + + j = 0; + + for (i = 1; i < 20000000; i++) { + j += i; + } + + sync_print(); + + sync_array_validate(sync_primary_wait_array); + + printf("Main thread releases now mutex!\n"); + + mutex_exit(&mutex); + + os_thread_wait(thr2); + + os_thread_wait(thr1); +} + +/****************************************************************** +Test function for possible convoy problem. */ + +void +test2(void) +/*=======*/ +{ + os_thread_t thr1, thr2, thr3, thr4, thr5; + os_thread_id_t id1, id2, id3, id4, id5; + ulint tm, oldtm; + ulint n1, n2, n3, n4, n5; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 2. Test of possible convoy problem.\n"); + + printf("System call count %lu\n", mutex_system_call_count); + + mutex_create(&mutex1); + mutex_create(&mutex2); + mutex_create(&mutex3); + mutex_create(&mutex4); + + switch_count = 0; + + oldtm = ut_clock(); + + n1 = 1; + + thr1 = os_thread_create(thread_n, + &n1, + &id1); + + os_thread_wait(thr1); + + + tm = ut_clock(); + printf("Wall clock time for single thread %ld milliseconds\n", + tm - oldtm); + printf("System call count %lu\n", mutex_system_call_count); + + switch_count = 0; + + oldtm = ut_clock(); + + n1 = 1; + thr1 = os_thread_create(thread_n, + &n1, + &id1); + n2 = 2; + thr2 = os_thread_create(thread_n, + &n2, + &id2); + n3 = 3; + thr3 = os_thread_create(thread_n, + &n3, + &id3); + n4 = 4; + thr4 = os_thread_create(thread_n, + &n4, + &id4); + n5 = 5; + thr5 = os_thread_create(thread_n, + &n5, + &id5); + + + os_thread_wait(thr1); + os_thread_wait(thr2); + os_thread_wait(thr3); + os_thread_wait(thr4); + os_thread_wait(thr5); + + + tm = ut_clock(); + printf("Wall clock time for 5 threads %ld milliseconds\n", + tm - oldtm); + printf("%ld thread switches occurred\n", switch_count); + + printf("If this is not 5 x single thread time, possibly convoy!\n"); + + printf("System call count %lu\n", mutex_system_call_count); +} + +/****************************************************************** +Test function for possible exclusion failure. */ + +void +test3(void) +/*=======*/ +{ + os_thread_t thr1, thr2; + os_thread_id_t id1, id2; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 3. Test of possible exclusion failure.\n"); + + glob_count = 0; + glob_inc = 1; + + thr1 = os_thread_create(thread_x, + NULL, + &id1); + thr2 = os_thread_create(thread_x, + NULL, + &id2); + + os_thread_wait(thr2); + os_thread_wait(thr1); + + ut_a(glob_count == 400000 * UNIV_DBC); +} + +/****************************************************************** +Test function for measuring the spin wait loop cycle time. */ + +void +test4(void) +/*=======*/ +{ +volatile ulint* ptr; + ulint i, tm, oldtm; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 4. Test of spin wait loop cycle time.\n"); + printf("Use this time to set the SYNC_SPIN_ROUNDS constant.\n"); + + + glob_inc = 1; + + ptr = &glob_inc; + + oldtm = ut_clock(); + + i = 0; + + while ((*ptr != 0) && (i < 10000000)) { + i++; + } + + tm = ut_clock(); + printf("Wall clock time for %ld cycles %ld milliseconds\n", + i, tm - oldtm); +} + +/******************************************************************** +Start function for s-lock thread in test5. */ +ulint +thread_srw(void* arg) +/*==============*/ +{ + ulint i, j; + void* arg2; + + arg2 = arg; + + printf("Thread_srw started!\n"); + + rw_lock_s_lock(&rw1); + + printf("Thread_srw has now s-lock!\n"); + + j = 0; + + for (i = 1; i < 1000000; i++) { + j += i; + } + + printf("Thread_srw releases now the s-lock!\n"); + + rw_lock_s_unlock(&rw1); + + return(j); +} + +/******************************************************************** +Start function for x-lock thread in test5. */ +ulint +thread_xrw(void* arg) +/*==============*/ +{ + ulint i, j; + void* arg2; + + arg2 = arg; + + printf("Thread_xrw started!\n"); + + rw_lock_x_lock(&rw1); + + printf("Thread_xrw has now x-lock!\n"); + + j = 0; + + for (i = 1; i < 1000000; i++) { + j += i; + } + + printf("Thread_xrw releases now the x-lock!\n"); + + rw_lock_x_unlock(&rw1); + + return(j); +} + + +void +test5(void) +/*=======*/ +{ + os_thread_t thr1, thr2; + os_thread_id_t id1, id2; + ulint i, j; + ulint tm, oldtm; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 5. Test of read-write locks.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + + rw_lock_create(&rw1); + + oldtm = ut_clock(); + + + for (i = 0; i < 10000 * UNIV_DBC * UNIV_DBC; i++) { + + rw_lock_s_lock(&rw1); + + rw_lock_s_unlock(&rw1); + + } + + tm = ut_clock(); + printf("Wall clock time for %ld rw s-lock-unlock %ld milliseconds\n", + i, tm - oldtm); + + + oldtm = ut_clock(); + + + for (i = 0; i < 10000 * UNIV_DBC * UNIV_DBC; i++) { + + mutex_enter(&mutex); + rc++; + mutex_exit(&mutex); + + mutex_enter(&mutex); + rc--; + mutex_exit(&mutex); + } + + tm = ut_clock(); + printf("Wall clock time for %ld rw test %ld milliseconds\n", + i, tm - oldtm); + + + + oldtm = ut_clock(); + + for (i = 0; i < 10000 * UNIV_DBC * UNIV_DBC; i++) { + + rw_lock_x_lock(&rw1); + rw_lock_x_unlock(&rw1); + + } + + tm = ut_clock(); + printf("Wall clock time for %ld rw x-lock-unlock %ld milliseconds\n", + i, tm - oldtm); + + + /* Test recursive x-locking */ + for (i = 0; i < 10000; i++) { + rw_lock_x_lock(&rw1); + } + + for (i = 0; i < 10000; i++) { + + rw_lock_x_unlock(&rw1); + } + + /* Test recursive s-locking */ + for (i = 0; i < 10000; i++) { + + rw_lock_s_lock(&rw1); + } + + for (i = 0; i < 10000; i++) { + + rw_lock_s_unlock(&rw1); + } + + rw_lock_s_lock(&rw1); + + ut_ad(1 == rw_lock_n_locked()); + + mem_print_info(); + + rw_lock_list_print_info(); + + thr2 = os_thread_create(thread_xrw, + NULL, + &id2); + + printf("Thread_xrw created, id %ld \n", id2); + + + thr1 = os_thread_create(thread_srw, + NULL, + &id1); + + printf("Thread_srw created, id %ld \n", id1); + + j = 0; + + for (i = 1; i < 10000000; i++) { + j += i; + } + + rw_lock_list_print_info(); + + sync_array_validate(sync_primary_wait_array); + + printf("Main thread releases now rw-lock!\n"); + + rw_lock_s_unlock(&rw1); + + os_thread_wait(thr2); + + os_thread_wait(thr1); + + sync_array_print_info(sync_primary_wait_array); +} + +/******************************************************************** +Start function for the competing s-threads in test6. The function tests +the behavior lock-coupling through 4 rw-locks. */ + +ulint +thread_qs(volatile void* arg) +/*========================*/ +{ + ulint i, j, k, n; + + arg = arg; + + n = os_thread_get_curr_id(); + + printf("S-Thread %ld started, thread id %lu\n", n, + os_thread_get_curr_id()); + + for (k = 0; k < 1000 * UNIV_DBC; k++) { + + if (qprint) + printf("S-Thread %ld starts round %ld!\n", n, k); + + rw_lock_s_lock(&rw1); + + if (qprint) + printf("S-Thread %ld got lock 1 on round %ld!\n", n, k); + + + if (last_thr != n) { + switch_count++; + last_thr = n; + } + + j = 0; + + for (i = 1; i < 400; i++) { + j += i; + } + + rw_lock_s_lock(&rw2); + + if (qprint) + printf("S-Thread %ld got lock 2 on round %ld!\n", n, k); + + + rw_lock_s_unlock(&rw1); + + if (qprint) + printf("S-Thread %ld released lock 1 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + rw_lock_s_lock(&rw3); + + if (qprint) + printf("S-Thread %ld got lock 3 on round %ld!\n", n, k); + + + rw_lock_s_unlock(&rw2); + if (qprint) + printf("S-Thread %ld released lock 2 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + rw_lock_s_lock(&rw4); + + if (qprint) + printf("S-Thread %ld got lock 4 on round %ld!\n", n, k); + + + rw_lock_s_unlock(&rw3); + if (qprint) + printf("S-Thread %ld released lock 3 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + + rw_lock_s_unlock(&rw4); + if (qprint) + printf("S-Thread %ld released lock 4 on round %ld!\n", n, k); + + } + + printf("S-Thread %ld exits!\n", n); + + return(j); +} + +/******************************************************************** +Start function for the competing x-threads in test6. The function tests +the behavior lock-coupling through 4 rw-locks. */ + +ulint +thread_qx(volatile void* arg) +/*========================*/ +{ + ulint i, j, k, n; + + arg = arg; + + n = os_thread_get_curr_id(); + + printf("X-Thread %ld started, thread id %lu\n", n, + os_thread_get_curr_id()); + + for (k = 0; k < 1000 * UNIV_DBC; k++) { + + if (qprint) + printf("X-Thread %ld round %ld!\n", n, k); + + + rw_lock_x_lock(&rw1); + if (qprint) + printf("X-Thread %ld got lock 1 on round %ld!\n", n, k); + + + if (last_thr != n) { + switch_count++; + last_thr = n; + } + + j = 0; + + for (i = 1; i < 400; i++) { + j += i; + } + + rw_lock_x_lock(&rw2); + if (qprint) + printf("X-Thread %ld got lock 2 on round %ld!\n", n, k); + + + rw_lock_x_unlock(&rw1); + if (qprint) + printf("X-Thread %ld released lock 1 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + rw_lock_x_lock(&rw3); + if (qprint) + printf("X-Thread %ld got lock 3 on round %ld!\n", n, k); + + + rw_lock_x_unlock(&rw2); + if (qprint) + printf("X-Thread %ld released lock 2 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + rw_lock_x_lock(&rw4); + if (qprint) + printf("X-Thread %ld got lock 4 on round %ld!\n", n, k); + + rw_lock_x_unlock(&rw3); + if (qprint) + printf("X-Thread %ld released lock 3 on round %ld!\n", n, k); + + + for (i = 1; i < 400; i++) { + j += i; + } + + rw_lock_x_unlock(&rw4); + if (qprint) + printf("X-Thread %ld released lock 4 on round %ld!\n", n, k); + + } + + printf("X-Thread %ld exits!\n", n); + + return(j); +} + +/****************************************************************** +Test function for possible queuing problems with rw-locks. */ + +void +test6(void) +/*=======*/ +{ + os_thread_t thr1, thr2, thr3, thr4, thr5; + os_thread_id_t id1, id2, id3, id4, id5; + ulint tm, oldtm; + ulint n1, n2, n3, n4, n5; + + printf("-------------------------------------------\n"); + printf( + "SYNC-TEST 6. Test of possible queuing problems with rw-locks.\n"); +/* + sync_array_print_info(sync_primary_wait_array); +*/ + + rw_lock_create(&rw2); + rw_lock_create(&rw3); + rw_lock_create(&rw4); + + switch_count = 0; + + + oldtm = ut_clock(); + + n1 = 1; + + thr1 = os_thread_create(thread_qs, + &n1, + &id1); + + os_thread_wait(thr1); + + + tm = ut_clock(); + printf("Wall clock time for single s-lock thread %ld milliseconds\n", + tm - oldtm); + + oldtm = ut_clock(); + + n1 = 1; + + thr1 = os_thread_create(thread_qx, + &n1, + &id1); + + os_thread_wait(thr1); + + + tm = ut_clock(); + printf("Wall clock time for single x-lock thread %ld milliseconds\n", + tm - oldtm); + + switch_count = 0; + + oldtm = ut_clock(); + + + n1 = 1; + thr1 = os_thread_create(thread_qx, + &n1, + &id1); + + n2 = 2; + thr2 = os_thread_create(thread_qs, + &n2, + &id2); + + n3 = 3; + thr3 = os_thread_create(thread_qx, + &n3, + &id3); + + + n4 = 4; + thr4 = os_thread_create(thread_qs, + &n4, + &id4); + + n5 = 5; + thr5 = os_thread_create(thread_qx, + &n5, + &id5); + + os_thread_wait(thr1); + + os_thread_wait(thr2); + + os_thread_wait(thr3); + + os_thread_wait(thr4); + + os_thread_wait(thr5); + + + tm = ut_clock(); + printf("Wall clock time for 5 threads %ld milliseconds\n", + tm - oldtm); + printf("at least %ld thread switches occurred\n", switch_count); + + printf( + "If this is not 2 x s-thread + 3 x x-thread time, possibly convoy!\n"); + + rw_lock_list_print_info(); + + sync_array_print_info(sync_primary_wait_array); + +} + +/******************************************************************** +Start function for thread in test7. */ +ulint +ip_thread(void* arg) +/*================*/ +{ + ulint i, j; + void* arg2; + ulint ret; + ulint tm, oldtm; + + arg2 = arg; + + printf("Thread started!\n"); + + oldtm = ut_clock(); + + ret = ip_mutex_enter(iph, 100000); + +/* ut_a(ret == SYNC_TIME_EXCEEDED); +*/ + tm = ut_clock(); + + printf("Wall clock time for wait failure %ld ms\n", tm - oldtm); + + ret = ip_mutex_enter(iph, SYNC_INFINITE_TIME); + + ut_a(ret == 0); + + printf("Thread owns now the ip mutex!\n"); + + j = 0; + + for (i = 1; i < 1000000; i++) { + j += i; + } + + printf("Thread releases now the ip mutex!\n"); + + ip_mutex_exit(iph); + + return(j); +} + +/********************************************************************* +Test for interprocess mutex. */ +void +test7(void) +/*=======*/ +{ + os_thread_t thr1; + os_thread_id_t id1; + ulint i, j; + ulint tm, oldtm; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 7. Test of ip mutex.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + ip_mutex_create(&ip_mutex, "IPMUTEX", &iph); + + oldtm = ut_clock(); + + for (i = 0; i < 100000 * UNIV_DBC; i++) { + + ip_mutex_enter(iph, SYNC_INFINITE_TIME); + ip_mutex_exit(iph); + } + + tm = ut_clock(); + printf("Wall clock time for %ld ip mutex lock-unlock %ld ms\n", + i, tm - oldtm); + + + ip_mutex_enter(iph, SYNC_INFINITE_TIME); + + thr1 = os_thread_create(ip_thread, + NULL, + &id1); + + printf("Thread created, id %ld \n", id1); + + + j = 0; + + for (i = 1; i < 100000000; i++) { + j += i; + } + + printf("Main thread releases now ip mutex!\n"); + + ip_mutex_exit(iph); + + os_thread_wait(thr1); + + ip_mutex_free(iph); +} + +/******************************************************************** +Start function for the competing threads in test8. The function tests +the behavior lock-coupling through 4 ip mutexes. */ + +ulint +thread_ipn(volatile void* arg) +/*========================*/ +{ + ulint i, j, k, n; + + n = *((ulint*)arg); + + printf("Thread %ld started!\n", n); + + for (k = 0; k < 2000 * UNIV_DBC; k++) { + + ip_mutex_enter(iph1, SYNC_INFINITE_TIME); + + if (last_thr != n) { + switch_count++; + last_thr = n; + } + + j = 0; + + for (i = 1; i < 400; i++) { + j += i; + } + + ip_mutex_enter(iph2, SYNC_INFINITE_TIME); + + ip_mutex_exit(iph1); + + for (i = 1; i < 400; i++) { + j += i; + } + ip_mutex_enter(iph3, SYNC_INFINITE_TIME); + + ip_mutex_exit(iph2); + + for (i = 1; i < 400; i++) { + j += i; + } + ip_mutex_enter(iph4, SYNC_INFINITE_TIME); + + ip_mutex_exit(iph3); + + for (i = 1; i < 400; i++) { + j += i; + } + + ip_mutex_exit(iph4); + } + + printf("Thread %ld exits!\n", n); + + return(j); +} + +/****************************************************************** +Test function for ip mutex. */ + +void +test8(void) +/*=======*/ +{ + os_thread_t thr1, thr2, thr3, thr4, thr5; + os_thread_id_t id1, id2, id3, id4, id5; + ulint tm, oldtm; + ulint n1, n2, n3, n4, n5; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 8. Test for ip mutex.\n"); + + + ip_mutex_create(&ip_mutex1, "jhfhk", &iph1); + ip_mutex_create(&ip_mutex2, "jggfg", &iph2); + ip_mutex_create(&ip_mutex3, "hfdx", &iph3); + ip_mutex_create(&ip_mutex4, "kjghg", &iph4); + + switch_count = 0; + + oldtm = ut_clock(); + + n1 = 1; + + thr1 = os_thread_create(thread_ipn, + &n1, + &id1); + + os_thread_wait(thr1); + + + tm = ut_clock(); + printf("Wall clock time for single thread %lu milliseconds\n", + tm - oldtm); + + switch_count = 0; + + oldtm = ut_clock(); + + n1 = 1; + thr1 = os_thread_create(thread_ipn, + &n1, + &id1); + n2 = 2; + thr2 = os_thread_create(thread_ipn, + &n2, + &id2); + n3 = 3; + thr3 = os_thread_create(thread_ipn, + &n3, + &id3); + n4 = 4; + thr4 = os_thread_create(thread_ipn, + &n4, + &id4); + n5 = 5; + thr5 = os_thread_create(thread_ipn, + &n5, + &id5); + + os_thread_wait(thr1); + os_thread_wait(thr2); + os_thread_wait(thr3); + os_thread_wait(thr4); + os_thread_wait(thr5); + + + tm = ut_clock(); + printf("Wall clock time for 5 threads %ld milliseconds\n", + tm - oldtm); + printf("%ld thread switches occurred\n", switch_count); + + printf("If this is not 5 x single thread time, possibly convoy!\n"); + + ip_mutex_free(iph1); + ip_mutex_free(iph2); + ip_mutex_free(iph3); + ip_mutex_free(iph4); +} + + +/******************************************************************** +Start function for s-lock thread in test9. */ +ulint +thread_srw9(void* arg) +/*==================*/ +{ + void* arg2; + + arg2 = arg; + + printf("Thread_srw9 started!\n"); + + rw_lock_x_lock(&rw10); + + printf("Thread_srw9 has now x-lock on rw10, wait for mutex!\n"); + + mutex_enter(&mutex9); + + return(0); +} + +/******************************************************************** +Start function for x-lock thread in test9. */ +ulint +thread_xrw9(void* arg) +/*==================*/ +{ + void* arg2; + + arg2 = arg; + + printf("Thread_xrw started!\n"); + + mutex_enter(&mutex9); + printf("Thread_xrw9 has now mutex9, wait for rw9!\n"); + + rw_lock_x_lock(&rw9); + + return(0); +} + +void +test9(void) +/*=======*/ +{ + os_thread_t thr1, thr2; + os_thread_id_t id1, id2; + + printf("-------------------------------------------\n"); + printf("SYNC-TEST 9. Test of deadlock detection.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + rw_lock_create(&rw9); + rw_lock_create(&rw10); + mutex_create(&mutex9); + + rw_lock_s_lock(&rw9); + printf("Main thread has now s-lock on rw9\n"); + + thr2 = os_thread_create(thread_xrw9, + NULL, + &id2); + + printf("Thread_xrw9 created, id %ld \n", id2); + + os_thread_sleep(1000000); + + thr1 = os_thread_create(thread_srw9, + NULL, + &id1); + + printf("Thread_srw9 created, id %ld \n", id1); + + os_thread_sleep(1000000); + + sync_array_print_info(sync_primary_wait_array); + + printf("Now we should have a deadlock of 3 threads:\n"); + + rw_lock_s_lock(&rw10); +} + +void +test10(void) +/*=======*/ +{ + printf("-------------------------------------------\n"); + printf("SYNC-TEST 10. Test of deadlock detection on self-deadlock.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + mutex_create(&mutex9); + + printf("Now we should have a deadlock of this thread on mutex:\n"); + + mutex_enter(&mutex9); + mutex_enter(&mutex9); +} + +void +test11(void) +/*=======*/ +{ + printf("-------------------------------------------\n"); + printf("SYNC-TEST 11. Test of deadlock detection on self-deadlock.\n"); + + + printf("Main thread %ld starts!\n", + os_thread_get_curr_id()); + + rw_lock_create(&rw9); + + printf("Now we should have a deadlock of this thread on X-lock:\n"); + + rw_lock_x_lock(&rw9); + rw_lock_s_lock_gen(&rw9, 567); +} + + +/************************************************************************ +Main test function. */ + +void +main(void) +/*======*/ +{ + ulint tm, oldtm; + + sync_init(); + mem_init(); + + oldtm = ut_clock(); + + test1(); + + test2(); + + test3(); + + test4(); + + test5(); + + test6(); + + test7(); + + test8(); + + /* This test SHOULD result in assert on deadlock! */ +/* test9();*/ + + /* This test SHOULD result in assert on deadlock! */ +/* test10();*/ + + /* This test SHOULD result in assert on deadlock! */ +/* test11();*/ + + ut_ad(0 == mutex_n_reserved()); + ut_ad(0 == rw_lock_n_locked()); + ut_ad(sync_all_freed()); + + + ut_ad(mem_all_freed()); + + sync_close(); + + tm = ut_clock(); + printf("Wall clock time for test %ld milliseconds\n", tm - oldtm); + printf("System call count %lu\n", mutex_system_call_count); + printf("TESTS COMPLETED SUCCESSFULLY!\n"); +} + + |