summaryrefslogtreecommitdiff
path: root/storage/maria/trnman.c
diff options
context:
space:
mode:
Diffstat (limited to 'storage/maria/trnman.c')
-rw-r--r--storage/maria/trnman.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/storage/maria/trnman.c b/storage/maria/trnman.c
index 09c8f9a264f..f523da22291 100644
--- a/storage/maria/trnman.c
+++ b/storage/maria/trnman.c
@@ -261,6 +261,7 @@ TRN *trnman_new_trn(WT_THD *wt)
{
int res;
TRN *trn;
+ union { TRN *trn; void *v; } tmp;
DBUG_ENTER("trnman_new_trn");
/*
@@ -276,19 +277,19 @@ TRN *trnman_new_trn(WT_THD *wt)
pthread_mutex_lock(&LOCK_trn_list);
/* Allocating a new TRN structure */
- trn= pool;
+ tmp.trn= pool;
/*
Popping an unused TRN from the pool
(ABA isn't possible, we're behind a mutex
*/
my_atomic_rwlock_wrlock(&LOCK_pool);
- while (trn && !my_atomic_casptr((void **)&pool, (void **)&trn,
- (void *)trn->next))
+ while (tmp.trn && !my_atomic_casptr((void **)&pool, &tmp.v,
+ (void *)tmp.trn->next))
/* no-op */;
my_atomic_rwlock_wrunlock(&LOCK_pool);
/* Nothing in the pool ? Allocate a new one */
- if (!trn)
+ if (!(trn= tmp.trn))
{
/*
trn should be completely initalized at create time to allow
@@ -359,7 +360,7 @@ TRN *trnman_new_trn(WT_THD *wt)
return 0;
}
- DBUG_PRINT("exit", ("trn: x%lx trid: 0x%lu",
+ DBUG_PRINT("exit", ("trn: 0x%lx trid: 0x%lu",
(ulong) trn, (ulong) trn->trid));
DBUG_RETURN(trn);
@@ -385,11 +386,12 @@ TRN *trnman_new_trn(WT_THD *wt)
my_bool trnman_end_trn(TRN *trn, my_bool commit)
{
int res= 1;
+ uint16 cached_short_id= trn->short_id; /* we have to cache it, see below */
TRN *free_me= 0;
LF_PINS *pins= trn->pins;
DBUG_ENTER("trnman_end_trn");
+ DBUG_PRINT("enter", ("trn=0x%lx commit=%d", (ulong) trn, commit));
- DBUG_ASSERT(trn->rec_lsn == 0);
/* if a rollback, all UNDO records should have been executed */
DBUG_ASSERT(commit || trn->undo_lsn == 0);
DBUG_PRINT("info", ("pthread_mutex_lock LOCK_trn_list"));
@@ -454,11 +456,17 @@ my_bool trnman_end_trn(TRN *trn, my_bool commit)
res= -1;
trnman_active_transactions--;
+ DBUG_PRINT("info", ("pthread_mutex_unlock LOCK_trn_list"));
pthread_mutex_unlock(&LOCK_trn_list);
- /* the rest is done outside of a critical section */
+ /*
+ the rest is done outside of a critical section
+
+ note that we don't own trn anymore, it may be in a shared list now.
+ Thus, we cannot dereference it, and must use cached_short_id below.
+ */
my_atomic_rwlock_rdlock(&LOCK_short_trid_to_trn);
- my_atomic_storeptr((void **)&short_trid_to_active_trn[trn->short_id], 0);
+ my_atomic_storeptr((void **)&short_trid_to_active_trn[cached_short_id], 0);
my_atomic_rwlock_rdunlock(&LOCK_short_trid_to_trn);
/*
@@ -503,7 +511,6 @@ void trnman_free_trn(TRN *trn)
*/
union { TRN *trn; void *v; } tmp;
-
pthread_mutex_lock(&trn->state_lock);
trn->short_id= 0;
pthread_mutex_unlock(&trn->state_lock);