summaryrefslogtreecommitdiff
path: root/mysys
diff options
context:
space:
mode:
authorunknown <jani@a193-229-222-105.elisa-laajakaista.fi>2005-09-30 13:56:32 +0300
committerunknown <jani@a193-229-222-105.elisa-laajakaista.fi>2005-09-30 13:56:32 +0300
commit3bb53a7cea355fa14a7538a13e542a177f5cbabd (patch)
tree50818821cc1e7a521e4cf0b23bf9a56ef17173ad /mysys
parentc8e0432dbdf5a41677f2c64b5a596badddf717b2 (diff)
parent06e08e047a2bc9efc2432f394e0cfa26a0d494d7 (diff)
downloadmariadb-git-3bb53a7cea355fa14a7538a13e542a177f5cbabd.tar.gz
Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-5.0
into a193-229-222-105.elisa-laajakaista.fi:/home/my/bk/mysql-5.0-icc myisam/mi_open.c: Auto merged myisam/myisamchk.c: Auto merged myisam/myisamdef.h: Auto merged myisam/myisampack.c: Auto merged sql/sql_parse.cc: Auto merged
Diffstat (limited to 'mysys')
-rw-r--r--mysys/mf_keycache.c72
1 files changed, 70 insertions, 2 deletions
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index a86b6a2f1f4..69410e9faaa 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -161,10 +161,12 @@ KEY_CACHE *dflt_key_cache= &dflt_key_cache_var;
#define FLUSH_CACHE 2000 /* sort this many blocks at once */
static int flush_all_key_blocks(KEY_CACHE *keycache);
+#ifdef THREAD
static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
struct st_my_thread_var *thread);
static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
struct st_my_thread_var *thread);
+#endif
static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block);
static void test_key_cache(KEY_CACHE *keycache,
const char *where, my_bool lock);
@@ -215,6 +217,7 @@ static void keycache_debug_print _VARARGS((const char *fmt,...));
#endif /* defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) */
#if defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF)
+#ifdef THREAD
static long keycache_thread_id;
#define KEYCACHE_THREAD_TRACE(l) \
KEYCACHE_DBUG_PRINT(l,("|thread %ld",keycache_thread_id))
@@ -226,6 +229,11 @@ static long keycache_thread_id;
#define KEYCACHE_THREAD_TRACE_END(l) \
KEYCACHE_DBUG_PRINT(l,("]thread %ld",keycache_thread_id))
+#else /* THREAD */
+#define KEYCACHE_THREAD_TRACE(l) KEYCACHE_DBUG_PRINT(l,(""))
+#define KEYCACHE_THREAD_TRACE_BEGIN(l) KEYCACHE_DBUG_PRINT(l,(""))
+#define KEYCACHE_THREAD_TRACE_END(l) KEYCACHE_DBUG_PRINT(l,(""))
+#endif /* THREAD */
#else
#define KEYCACHE_THREAD_TRACE_BEGIN(l)
#define KEYCACHE_THREAD_TRACE_END(l)
@@ -492,6 +500,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
keycache_pthread_mutex_lock(&keycache->cache_lock);
+#ifdef THREAD
wqueue= &keycache->resize_queue;
thread= my_thread_var;
link_into_queue(wqueue, thread);
@@ -500,6 +509,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
{
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
}
+#endif
keycache->resize_in_flush= 1;
if (flush_all_key_blocks(keycache))
@@ -512,12 +522,16 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
}
keycache->resize_in_flush= 0;
keycache->can_be_used= 0;
+#ifdef THREAD
while (keycache->cnt_for_resize_op)
{
KEYCACHE_DBUG_PRINT("resize_key_cache: wait",
("suspend thread %ld", thread->id));
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
}
+#else
+ KEYCACHE_DBUG_ASSERT(keycache->cnt_for_resize_op == 0);
+#endif
end_key_cache(keycache, 0); /* Don't free mutex */
/* The following will work even if use_mem is 0 */
@@ -525,6 +539,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
division_limit, age_threshold);
finish:
+#ifdef THREAD
unlink_from_queue(wqueue, thread);
/* Signal for the next resize request to proceeed if any */
if (wqueue->last_thread)
@@ -533,6 +548,7 @@ finish:
("thread %ld", wqueue->last_thread->next->id));
keycache_pthread_cond_signal(&wqueue->last_thread->next->suspend);
}
+#endif
keycache_pthread_mutex_unlock(&keycache->cache_lock);
return blocks;
}
@@ -553,6 +569,7 @@ static inline void inc_counter_for_resize_op(KEY_CACHE *keycache)
*/
static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
{
+#ifdef THREAD
struct st_my_thread_var *last_thread;
if (!--keycache->cnt_for_resize_op &&
(last_thread= keycache->resize_queue.last_thread))
@@ -561,6 +578,9 @@ static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
("thread %ld", last_thread->next->id));
keycache_pthread_cond_signal(&last_thread->next->suspend);
}
+#else
+ keycache->cnt_for_resize_op--;
+#endif
}
/*
@@ -650,6 +670,7 @@ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup)
} /* end_key_cache */
+#ifdef THREAD
/*
Link a thread into double-linked queue of waiting threads.
@@ -786,6 +807,7 @@ static void release_queue(KEYCACHE_WQUEUE *wqueue)
while (thread != last);
wqueue->last_thread= NULL;
}
+#endif
/*
@@ -893,6 +915,7 @@ static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot,
BLOCK_LINK **pins;
KEYCACHE_DBUG_ASSERT(! (block->hash_link && block->hash_link->requests));
+#ifdef THREAD
if (!hot && keycache->waiting_for_block.last_thread)
{
/* Signal that in the LRU warm sub-chain an available block has appeared */
@@ -929,6 +952,10 @@ static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot,
#endif
return;
}
+#else /* THREAD */
+ KEYCACHE_DBUG_ASSERT(! (!hot && keycache->waiting_for_block.last_thread));
+ /* Condition not transformed using DeMorgan, to keep the text identical */
+#endif /* THREAD */
pins= hot ? &keycache->used_ins : &keycache->used_last;
ins= *pins;
if (ins)
@@ -1101,6 +1128,7 @@ static inline void remove_reader(BLOCK_LINK *block)
static inline void wait_for_readers(KEY_CACHE *keycache, BLOCK_LINK *block)
{
+#ifdef THREAD
struct st_my_thread_var *thread= my_thread_var;
while (block->hash_link->requests)
{
@@ -1111,6 +1139,9 @@ static inline void wait_for_readers(KEY_CACHE *keycache, BLOCK_LINK *block)
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
block->condvar= NULL;
}
+#else
+ KEYCACHE_DBUG_ASSERT(block->hash_link->requests == 0);
+#endif
}
@@ -1140,6 +1171,7 @@ static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
if ((*hash_link->prev= hash_link->next))
hash_link->next->prev= hash_link->prev;
hash_link->block= NULL;
+#ifdef THREAD
if (keycache->waiting_for_hash_link.last_thread)
{
/* Signal that a free hash link has appeared */
@@ -1175,6 +1207,9 @@ static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
hash_link);
return;
}
+#else /* THREAD */
+ KEYCACHE_DBUG_ASSERT(! (keycache->waiting_for_hash_link.last_thread));
+#endif /* THREAD */
hash_link->next= keycache->free_hash_list;
keycache->free_hash_list= hash_link;
}
@@ -1240,6 +1275,7 @@ restart:
}
else
{
+#ifdef THREAD
/* Wait for a free hash link */
struct st_my_thread_var *thread= my_thread_var;
KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting"));
@@ -1252,6 +1288,9 @@ restart:
keycache_pthread_cond_wait(&thread->suspend,
&keycache->cache_lock);
thread->opt_info= NULL;
+#else
+ KEYCACHE_DBUG_ASSERT(0);
+#endif
goto restart;
}
hash_link->file= file;
@@ -1363,6 +1402,7 @@ restart:
/* Wait intil the page is flushed on disk */
hash_link->requests--;
{
+#ifdef THREAD
struct st_my_thread_var *thread= my_thread_var;
add_to_queue(&block->wqueue[COND_FOR_SAVED], thread);
do
@@ -1373,6 +1413,16 @@ restart:
&keycache->cache_lock);
}
while(thread->next);
+#else
+ KEYCACHE_DBUG_ASSERT(0);
+ /*
+ Given the use of "resize_in_flush", it seems impossible
+ that this whole branch is ever entered in single-threaded case
+ because "(wrmode && keycache->resize_in_flush)" cannot be true.
+ TODO: Check this, and then put the whole branch into the
+ "#ifdef THREAD" guard.
+ */
+#endif
}
/* Invalidate page in the block if it has not been done yet */
if (block->status)
@@ -1401,6 +1451,7 @@ restart:
KEYCACHE_DBUG_PRINT("find_key_block",
("request waiting for old page to be saved"));
{
+#ifdef THREAD
struct st_my_thread_var *thread= my_thread_var;
/* Put the request into the queue of those waiting for the old page */
add_to_queue(&block->wqueue[COND_FOR_SAVED], thread);
@@ -1413,6 +1464,10 @@ restart:
&keycache->cache_lock);
}
while(thread->next);
+#else
+ KEYCACHE_DBUG_ASSERT(0);
+ /* No parallel requests in single-threaded case */
+#endif
}
KEYCACHE_DBUG_PRINT("find_key_block",
("request for old page resubmitted"));
@@ -1471,6 +1526,7 @@ restart:
all of them must get the same block
*/
+#ifdef THREAD
if (! keycache->used_last)
{
struct st_my_thread_var *thread= my_thread_var;
@@ -1486,6 +1542,9 @@ restart:
while (thread->next);
thread->opt_info= NULL;
}
+#else
+ KEYCACHE_DBUG_ASSERT(keycache->used_last);
+#endif
block= hash_link->block;
if (! block)
{
@@ -1674,6 +1733,7 @@ static void read_block(KEY_CACHE *keycache,
KEYCACHE_DBUG_PRINT("read_block",
("secondary request waiting for new page to be read"));
{
+#ifdef THREAD
struct st_my_thread_var *thread= my_thread_var;
/* Put the request into a queue and wait until it can be processed */
add_to_queue(&block->wqueue[COND_FOR_REQUESTED], thread);
@@ -1685,6 +1745,10 @@ static void read_block(KEY_CACHE *keycache,
&keycache->cache_lock);
}
while (thread->next);
+#else
+ KEYCACHE_DBUG_ASSERT(0);
+ /* No parallel requests in single-threaded case */
+#endif
}
KEYCACHE_DBUG_PRINT("read_block",
("secondary request: new page in cache"));
@@ -1822,7 +1886,7 @@ byte *key_cache_read(KEY_CACHE *keycache,
#ifndef THREAD
/* This is only true if we where able to read everything in one block */
if (return_buffer)
- return (block->buffer);
+ DBUG_RETURN(block->buffer);
#endif
buff+= read_length;
filepos+= read_length+offset;
@@ -2398,6 +2462,7 @@ restart:
#endif
block= first_in_switch;
{
+#ifdef THREAD
struct st_my_thread_var *thread= my_thread_var;
add_to_queue(&block->wqueue[COND_FOR_SAVED], thread);
do
@@ -2408,6 +2473,10 @@ restart:
&keycache->cache_lock);
}
while (thread->next);
+#else
+ KEYCACHE_DBUG_ASSERT(0);
+ /* No parallel requests in single-threaded case */
+#endif
}
#if defined(KEYCACHE_DEBUG)
cnt++;
@@ -2574,7 +2643,6 @@ static void test_key_cache(KEY_CACHE *keycache __attribute__((unused)),
static void keycache_dump(KEY_CACHE *keycache)
{
FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w");
- struct st_my_thread_var *thread_var= my_thread_var;
struct st_my_thread_var *last;
struct st_my_thread_var *thread;
BLOCK_LINK *block;