summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@narttu.mysql.fi>2003-10-07 21:41:07 +0300
committerunknown <monty@narttu.mysql.fi>2003-10-07 21:41:07 +0300
commit118d532134a1700bdb7ba29ce41712994f023692 (patch)
treea56526a95032e3fe7e1c0fc41e71ad9bef89d5d8 /sql
parentd1485aad0eb79559902b1af26502b7cc53f8e95a (diff)
parent60e4f7d5066d549f782b975fdf695dbdab074f1b (diff)
downloadmariadb-git-118d532134a1700bdb7ba29ce41712994f023692.tar.gz
merge
sql/mysqld.cc: Auto merged sql/sql_select.cc: Auto merged innobase/row/row0mysql.c: merge (no changes) innobase/row/row0sel.c: merge (no changes) sql/item_subselect.cc: Use sanjas patch instead
Diffstat (limited to 'sql')
-rw-r--r--sql/des_key_file.cc8
-rw-r--r--sql/item_func.cc243
-rw-r--r--sql/item_func.h5
-rw-r--r--sql/item_strfunc.cc34
-rw-r--r--sql/item_subselect.cc1
-rw-r--r--sql/log.cc8
-rw-r--r--sql/log_event.cc48
-rw-r--r--sql/mysql_priv.h4
-rw-r--r--sql/mysqld.cc12
-rw-r--r--sql/repl_failsafe.cc14
-rw-r--r--sql/slave.cc34
-rw-r--r--sql/slave.h7
-rw-r--r--sql/sql_base.cc14
-rw-r--r--sql/sql_class.h5
-rw-r--r--sql/sql_load.cc44
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_select.cc71
17 files changed, 336 insertions, 223 deletions
diff --git a/sql/des_key_file.cc b/sql/des_key_file.cc
index 5b25819b657..c6b4c5f2c34 100644
--- a/sql/des_key_file.cc
+++ b/sql/des_key_file.cc
@@ -79,16 +79,16 @@ load_des_key_file(const char *file_name)
if (start != end)
{
- des_cblock ivec;
+ DES_cblock ivec;
bzero((char*) &ivec,sizeof(ivec));
// We make good 24-byte (168 bit) key from given plaintext key with MD5
EVP_BytesToKey(EVP_des_ede3_cbc(),EVP_md5(),NULL,
(uchar *) start, (int) (end-start),1,
(uchar *) &keyblock,
ivec);
- des_set_key_unchecked(&keyblock.key1,des_keyschedule[(int)offset].ks1);
- des_set_key_unchecked(&keyblock.key2,des_keyschedule[(int)offset].ks2);
- des_set_key_unchecked(&keyblock.key3,des_keyschedule[(int)offset].ks3);
+ DES_set_key_unchecked(&keyblock.key1,&(des_keyschedule[(int)offset].ks1));
+ DES_set_key_unchecked(&keyblock.key2,&(des_keyschedule[(int)offset].ks2));
+ DES_set_key_unchecked(&keyblock.key3,&(des_keyschedule[(int)offset].ks3));
if (des_default_key == 15)
des_default_key= (uint) offset; // use first as def.
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 0f9ee512be1..bc7c95d8929 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -2105,13 +2105,13 @@ bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables,
if (Item_func::fix_fields(thd, tables, ref) ||
!(entry= get_variable(&thd->user_vars, name, 1)))
return 1;
- entry->type= cached_result_type;
/*
Remember the last query which updated it, this way a query can later know
if this variable is a constant item in the query (it is if update_query_id
is different from query_id).
*/
- entry->update_query_id=thd->query_id;
+ entry->update_query_id= thd->query_id;
+ cached_result_type= args[0]->result_type();
return 0;
}
@@ -2122,10 +2122,10 @@ Item_func_set_user_var::fix_length_and_dec()
maybe_null=args[0]->maybe_null;
max_length=args[0]->max_length;
decimals=args[0]->decimals;
- cached_result_type=args[0]->result_type();
}
-void Item_func_set_user_var::update_hash(void *ptr, uint length,
+
+bool Item_func_set_user_var::update_hash(void *ptr, uint length,
Item_result type,
CHARSET_INFO *cs,
Derivation dv)
@@ -2141,6 +2141,8 @@ void Item_func_set_user_var::update_hash(void *ptr, uint length,
}
else
{
+ if (type == STRING_RESULT)
+ length++; // Store strings with end \0
if (length <= extra_size)
{
/* Save value in value struct */
@@ -2165,35 +2167,135 @@ void Item_func_set_user_var::update_hash(void *ptr, uint length,
goto err;
}
}
+ if (type == STRING_RESULT)
+ {
+ length--; // Fix length change above
+ entry->value[length]= 0; // Store end \0
+ }
memcpy(entry->value,ptr,length);
entry->length= length;
entry->type=type;
entry->collation.set(cs, dv);
}
- return;
+ return 0;
err:
current_thd->fatal_error(); // Probably end of memory
- null_value=1;
- return;
+ null_value= 1;
+ return 1;
+}
+
+
+/* Get the value of a variable as a double */
+
+double user_var_entry::val(my_bool *null_value)
+{
+ if ((*null_value= (value == 0)))
+ return 0.0;
+
+ switch (type) {
+ case REAL_RESULT:
+ return *(double*) value;
+ case INT_RESULT:
+ return (double) *(longlong*) value;
+ case STRING_RESULT:
+ return atof(value); // This is null terminated
+ }
+ return 0.0; // Impossible
+}
+
+
+/* Get the value of a variable as an integer */
+
+longlong user_var_entry::val_int(my_bool *null_value)
+{
+ if ((*null_value= (value == 0)))
+ return LL(0);
+
+ switch (type) {
+ case REAL_RESULT:
+ return (longlong) *(double*) value;
+ case INT_RESULT:
+ return *(longlong*) value;
+ case STRING_RESULT:
+ return strtoull(value,NULL,10); // String is null terminated
+ }
+ return LL(0); // Impossible
}
+/* Get the value of a variable as a string */
+
+String *user_var_entry::val_str(my_bool *null_value, String *str,
+ uint decimals)
+{
+ if ((*null_value= (value == 0)))
+ return (String*) 0;
+
+ switch (type) {
+ case REAL_RESULT:
+ str->set(*(double*) value, decimals, &my_charset_bin);
+ break;
+ case INT_RESULT:
+ str->set(*(longlong*) value, &my_charset_bin);
+ break;
+ case STRING_RESULT:
+ if (str->copy(value, length, collation.collation))
+ str= 0; // EOM error
+ }
+ return(str);
+}
+
+
+/*
+ This functions is invoked on SET @variable or @variable:= expression.
+
+ SYNOPSIS
+ Item_func_set_user_var::update()
+
+ NOTES
+ We have to store the expression as such in the variable, independent of
+ the value method used by the user
+
+ RETURN
+ 0 Ok
+ 1 EOM Error
+
+*/
+
bool
Item_func_set_user_var::update()
{
+ bool res;
+ DBUG_ENTER("Item_func_set_user_var::update");
+ LINT_INIT(res);
+
switch (cached_result_type) {
case REAL_RESULT:
- (void) val();
+ {
+ double value=args[0]->val();
+ res= update_hash((void*) &value,sizeof(value), REAL_RESULT,
+ &my_charset_bin, DERIVATION_NONE);
break;
+ }
case INT_RESULT:
- (void) val_int();
+ {
+ longlong value=args[0]->val_int();
+ res= update_hash((void*) &value, sizeof(longlong), INT_RESULT,
+ &my_charset_bin, DERIVATION_NONE);
break;
+ }
+ break;
case STRING_RESULT:
{
- char buffer[MAX_FIELD_WIDTH];
- String tmp(buffer,sizeof(buffer),&my_charset_bin);
- (void) val_str(&tmp);
+ String *tmp;
+ tmp=args[0]->val_str(&value);
+ if (!tmp) // Null value
+ res= update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin,
+ DERIVATION_NONE);
+ else
+ res= update_hash((void*) tmp->ptr(), tmp->length(), STRING_RESULT,
+ tmp->charset(), args[0]->collation.derivation);
break;
}
case ROW_RESULT:
@@ -2202,44 +2304,32 @@ Item_func_set_user_var::update()
DBUG_ASSERT(0);
break;
}
- return current_thd->is_fatal_error;
+ DBUG_RETURN(res);
}
-double
-Item_func_set_user_var::val()
+double Item_func_set_user_var::val()
{
- double value=args[0]->val();
- update_hash((void*) &value,sizeof(value), REAL_RESULT,
- &my_charset_bin, DERIVATION_NONE);
- return value;
+ update(); // Store expression
+ return entry->val(&null_value);
}
-longlong
-Item_func_set_user_var::val_int()
+longlong Item_func_set_user_var::val_int()
{
- longlong value=args[0]->val_int();
- update_hash((void*) &value, sizeof(longlong), INT_RESULT,
- &my_charset_bin, DERIVATION_NONE);
- return value;
+ update(); // Store expression
+ return entry->val_int(&null_value);
}
-String *
-Item_func_set_user_var::val_str(String *str)
+String *Item_func_set_user_var::val_str(String *str)
{
- String *res=args[0]->val_str(str);
- if (!res) // Null value
- update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin, DERIVATION_NONE);
- else
- update_hash((void*) res->ptr(), res->length(), STRING_RESULT,
- res->charset(), args[0]->collation.derivation);
- return res;
+ update(); // Store expression
+ return entry->val_str(&null_value, str, decimals);
}
void Item_func_set_user_var::print(String *str)
{
- str->append('(');
+ str->append("(@@",3);
str->append(name.str,name.length);
str->append(":=",2);
args[0]->print(str);
@@ -2247,89 +2337,29 @@ void Item_func_set_user_var::print(String *str)
}
-user_var_entry *Item_func_get_user_var::get_entry()
-{
- if (!var_entry || ! var_entry->value)
- {
- null_value=1;
- return 0;
- }
- null_value=0;
- return var_entry;
-}
-
-
String *
Item_func_get_user_var::val_str(String *str)
{
- user_var_entry *entry=get_entry();
- if (!entry)
- return NULL;
- switch (entry->type) {
- case REAL_RESULT:
- str->set(*(double*) entry->value,decimals, &my_charset_bin);
- break;
- case INT_RESULT:
- str->set(*(longlong*) entry->value, &my_charset_bin);
- break;
- case STRING_RESULT:
- if (str->copy(entry->value, entry->length, entry->collation.collation))
- {
- null_value=1;
- return NULL;
- }
- break;
- case ROW_RESULT:
- default:
- // This case should never be choosen
- DBUG_ASSERT(0);
- break;
- }
- return str;
+ DBUG_ENTER("Item_func_get_user_var::val_str");
+ if (!var_entry)
+ return (String*) 0; // No such variable
+ DBUG_RETURN(var_entry->val_str(&null_value, str, decimals));
}
double Item_func_get_user_var::val()
{
- user_var_entry *entry=get_entry();
- if (!entry)
- return 0.0;
- switch (entry->type) {
- case REAL_RESULT:
- return *(double*) entry->value;
- case INT_RESULT:
- return (double) *(longlong*) entry->value;
- case STRING_RESULT:
- return atof(entry->value); // This is null terminated
- case ROW_RESULT:
- default:
- // This case should never be choosen
- DBUG_ASSERT(0);
- return 0;
- }
- return 0.0; // Impossible
+ if (!var_entry)
+ return 0.0; // No such variable
+ return (var_entry->val(&null_value));
}
longlong Item_func_get_user_var::val_int()
{
- user_var_entry *entry=get_entry();
- if (!entry)
- return LL(0);
- switch (entry->type) {
- case REAL_RESULT:
- return (longlong) *(double*) entry->value;
- case INT_RESULT:
- return *(longlong*) entry->value;
- case STRING_RESULT:
- return strtoull(entry->value,NULL,10); // String is null terminated
- case ROW_RESULT:
- default:
- // This case should never be choosen
- DBUG_ASSERT(0);
- return 0;
- }
- return LL(0); // Impossible
+ if (!var_entry)
+ return LL(0); // No such variable
+ return (var_entry->val_int(&null_value));
}
@@ -2348,7 +2378,8 @@ void Item_func_get_user_var::fix_length_and_dec()
decimals=NOT_FIXED_DEC;
max_length=MAX_BLOB_WIDTH;
- var_entry= get_variable(&thd->user_vars, name, 0);
+ if (!(var_entry= get_variable(&thd->user_vars, name, 0)))
+ null_value= 1;
if (!(opt_bin_log && is_update_query(thd->lex.sql_command)))
return;
diff --git a/sql/item_func.h b/sql/item_func.h
index 86cf19d92f3..8086e65786d 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -898,6 +898,8 @@ class Item_func_set_user_var :public Item_func
enum Item_result cached_result_type;
LEX_STRING name;
user_var_entry *entry;
+ char buffer[MAX_FIELD_WIDTH];
+ String value;
public:
Item_func_set_user_var(LEX_STRING a,Item *b)
@@ -906,7 +908,7 @@ public:
double val();
longlong val_int();
String *val_str(String *str);
- void update_hash(void *ptr, uint length, enum Item_result type,
+ bool update_hash(void *ptr, uint length, enum Item_result type,
CHARSET_INFO *cs, Derivation dv);
bool update();
enum Item_result result_type () const { return cached_result_type; }
@@ -925,7 +927,6 @@ class Item_func_get_user_var :public Item_func
public:
Item_func_get_user_var(LEX_STRING a):
Item_func(), name(a) {}
- user_var_entry *get_entry();
double val();
longlong val_int();
String *val_str(String* str);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 86efde096c5..c8ee64dc707 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -350,7 +350,7 @@ void Item_func_concat::fix_length_and_dec()
String *Item_func_des_encrypt::val_str(String *str)
{
#ifdef HAVE_OPENSSL
- des_cblock ivec;
+ DES_cblock ivec;
struct st_des_keyblock keyblock;
struct st_des_keyschedule keyschedule;
const char *append_str="********";
@@ -390,9 +390,9 @@ String *Item_func_des_encrypt::val_str(String *str)
EVP_BytesToKey(EVP_des_ede3_cbc(),EVP_md5(),NULL,
(uchar*) keystr->ptr(), (int) keystr->length(),
1, (uchar*) &keyblock,ivec);
- des_set_key_unchecked(&keyblock.key1,keyschedule.ks1);
- des_set_key_unchecked(&keyblock.key2,keyschedule.ks2);
- des_set_key_unchecked(&keyblock.key3,keyschedule.ks3);
+ DES_set_key_unchecked(&keyblock.key1,&keyschedule.ks1);
+ DES_set_key_unchecked(&keyblock.key2,&keyschedule.ks2);
+ DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3);
}
/*
@@ -413,12 +413,12 @@ String *Item_func_des_encrypt::val_str(String *str)
tmp_value[0]=(char) (128 | key_number);
// Real encryption
bzero((char*) &ivec,sizeof(ivec));
- des_ede3_cbc_encrypt((const uchar*) (res->ptr()),
+ DES_ede3_cbc_encrypt((const uchar*) (res->ptr()),
(uchar*) (tmp_value.ptr()+1),
res_length,
- keyschedule.ks1,
- keyschedule.ks2,
- keyschedule.ks3,
+ &keyschedule.ks1,
+ &keyschedule.ks2,
+ &keyschedule.ks3,
&ivec, TRUE);
return &tmp_value;
@@ -432,8 +432,8 @@ error:
String *Item_func_des_decrypt::val_str(String *str)
{
#ifdef HAVE_OPENSSL
- des_key_schedule ks1, ks2, ks3;
- des_cblock ivec;
+ DES_key_schedule ks1, ks2, ks3;
+ DES_cblock ivec;
struct st_des_keyblock keyblock;
struct st_des_keyschedule keyschedule;
String *res= args[0]->val_str(str);
@@ -467,20 +467,20 @@ String *Item_func_des_decrypt::val_str(String *str)
(uchar*) keystr->ptr(),(int) keystr->length(),
1,(uchar*) &keyblock,ivec);
// Here we set all 64-bit keys (56 effective) one by one
- des_set_key_unchecked(&keyblock.key1,keyschedule.ks1);
- des_set_key_unchecked(&keyblock.key2,keyschedule.ks2);
- des_set_key_unchecked(&keyblock.key3,keyschedule.ks3);
+ DES_set_key_unchecked(&keyblock.key1,&keyschedule.ks1);
+ DES_set_key_unchecked(&keyblock.key2,&keyschedule.ks2);
+ DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3);
}
if (tmp_value.alloc(length-1))
goto error;
bzero((char*) &ivec,sizeof(ivec));
- des_ede3_cbc_encrypt((const uchar*) res->ptr()+1,
+ DES_ede3_cbc_encrypt((const uchar*) res->ptr()+1,
(uchar*) (tmp_value.ptr()),
length-1,
- keyschedule.ks1,
- keyschedule.ks2,
- keyschedule.ks3,
+ &keyschedule.ks1,
+ &keyschedule.ks2,
+ &keyschedule.ks3,
&ivec, FALSE);
/* Restore old length of key */
if ((tail=(uint) (uchar) tmp_value[length-2]) > 8)
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index fd024daca08..3d6fd4c316d 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1096,6 +1096,7 @@ int subselect_indexsubquery_engine::exec()
/* Check if there exists a row with a null value in the index */
if ((error= (safe_index_read(tab) == 1)))
break;
+ }
}
}
}
diff --git a/sql/log.cc b/sql/log.cc
index 41184615508..27c7c64f9c8 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -983,7 +983,7 @@ void MYSQL_LOG::new_file(bool need_lock)
close(LOG_CLOSE_TO_BE_OPENED);
/*
- Note that at this point, log_type == LOG_CLOSED (important for is_open()).
+ Note that at this point, log_type != LOG_CLOSED (important for is_open()).
*/
open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type,
@@ -1463,9 +1463,9 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
/*
Now this Query_log_event has artificial log_pos 0. It must be adjusted
to reflect the real position in the log. Not doing it would confuse the
- slave: it would prevent this one from knowing where he is in the master's
- binlog, which would result in wrong positions being shown to the user,
- MASTER_POS_WAIT undue waiting etc.
+ slave: it would prevent this one from knowing where he is in the
+ master's binlog, which would result in wrong positions being shown to
+ the user, MASTER_POS_WAIT undue waiting etc.
*/
qinfo.set_log_pos(this);
if (qinfo.write(&log_file))
diff --git a/sql/log_event.cc b/sql/log_event.cc
index b5fd78c06a9..2050be0e6de 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -121,14 +121,10 @@ static inline char* slave_load_file_stem(char*buf, uint file_id,
/*
- cleanup_load_tmpdir()
-
Delete all temporary files used for SQL_LOAD.
- TODO
- - When we get a 'server start' event, we should only remove
- the files associated with the server id that just started.
- Easily fixable by adding server_id as a prefix to the log files.
+ SYNOPSIS
+ cleanup_load_tmpdir()
*/
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
@@ -137,15 +133,28 @@ static void cleanup_load_tmpdir()
MY_DIR *dirp;
FILEINFO *file;
uint i;
- char fname[FN_REFLEN];
+ char fname[FN_REFLEN], prefbuf[31], *p;
if (!(dirp=my_dir(slave_load_tmpdir,MYF(MY_WME))))
return;
+ /*
+ When we are deleting temporary files, we should only remove
+ the files associated with the server id of our server.
+ We don't use event_server_id here because since we've disabled
+ direct binlogging of Create_file/Append_file/Exec_load events
+ we cannot meet Start_log event in the middle of events from one
+ LOAD DATA.
+ */
+ p= strmake(prefbuf,"SQL_LOAD-",9);
+ p= int10_to_str(::server_id, p, 10);
+ *(p++)= '-';
+ *p= 0;
+
for (i=0 ; i < (uint)dirp->number_off_files; i++)
{
file=dirp->dir_entry+i;
- if (is_prefix(file->name,"SQL_LOAD-"))
+ if (is_prefix(file->name, prefbuf))
{
fn_format(fname,file->name,slave_load_tmpdir,"",MY_UNPACK_FILENAME);
my_delete(fname, MYF(0));
@@ -1096,11 +1105,10 @@ int Start_log_event::exec_event(struct st_relay_log_info* rli)
*/
if (thd->options & OPTION_BEGIN)
{
- slave_print_error(rli, 0,
- "there is an unfinished transaction in the relay log \
-(could find neither COMMIT nor ROLLBACK in the relay log); it could be that \
-the master died while writing the transaction to its binary log. Now the slave \
-is rolling back the transaction.");
+ slave_print_error(rli, 0, "\
+Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \
+Probably cause is that the master died while writing the transaction to it's \
+binary log.");
return(1);
}
break;
@@ -1882,8 +1890,8 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli)
ROTATE (a fake one)
...
COMMIT or ROLLBACK
- In that case, we don't want to touch the coordinates which correspond to the
- beginning of the transaction.
+ In that case, we don't want to touch the coordinates which correspond to
+ the beginning of the transaction.
*/
if (!(thd->options & OPTION_BEGIN))
{
@@ -3065,6 +3073,16 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
}
goto err;
}
+ /*
+ We have an open file descriptor to the .info file; we need to close it
+ or Windows will refuse to delete the file in my_delete().
+ */
+ if (fd >= 0)
+ {
+ my_close(fd, MYF(0));
+ end_io_cache(&file);
+ fd= -1;
+ }
(void) my_delete(fname, MYF(MY_WME));
memcpy(p, ".data", 6);
(void) my_delete(fname, MYF(MY_WME));
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index d6e6256c388..316810b1910 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -537,11 +537,11 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length,
#include <openssl/des.h>
struct st_des_keyblock
{
- des_cblock key1, key2, key3;
+ DES_cblock key1, key2, key3;
};
struct st_des_keyschedule
{
- des_key_schedule ks1, ks2, ks3;
+ DES_key_schedule ks1, ks2, ks3;
};
extern char *des_key_file;
extern struct st_des_keyschedule des_keyschedule[10];
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 17b4dad9d1c..c6af4ad5c12 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -60,7 +60,9 @@
char pstack_file_name[80];
#endif /* __linux__ */
-#if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ)
+/* We have HAVE_purify below as this speeds up the shutdown of MySQL */
+
+#if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ) || defined(HAVE_purify) && defined(__linux__)
#define HAVE_CLOSE_SERVER_SOCK 1
#endif
@@ -517,12 +519,14 @@ static void close_connections(void)
struct timespec abstime;
int error;
LINT_INIT(error);
+ DBUG_PRINT("info",("Waiting for select_thread"));
+
#ifndef DONT_USE_THR_ALARM
if (pthread_kill(select_thread,THR_CLIENT_ALARM))
break; // allready dead
#endif
set_timespec(abstime, 2);
- for (uint tmp=0 ; tmp < 10 ; tmp++)
+ for (uint tmp=0 ; tmp < 10 && select_thread_in_use; tmp++)
{
error=pthread_cond_timedwait(&COND_thread_count,&LOCK_thread_count,
&abstime);
@@ -682,8 +686,8 @@ static void close_server_sock()
VOID(shutdown(tmp_sock,2));
#if defined(__NETWARE__)
/*
- The following code is disabled for normal systems as it causes MySQL
- AIX 4.3 during shutdown (not tested, but likely)
+ The following code is disabled for normal systems as it may cause MySQL
+ to hang on AIX 4.3 during shutdown
*/
DBUG_PRINT("info",("calling closesocket on unix/IP socket"));
VOID(closesocket(tmp_sock));
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 7c943d4ae53..2fe8946410f 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -734,7 +734,8 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db,
if (!tables_ok(thd, &table))
continue;
}
- if ((error= fetch_master_table(thd, db, table_name, mi, mysql)))
+ /* download master's table and overwrite slave's table */
+ if ((error= fetch_master_table(thd, db, table_name, mi, mysql, 1)))
return error;
}
return 0;
@@ -836,8 +837,11 @@ int load_master_data(THD* thd)
char* db = row[0];
/*
- Do not replicate databases excluded by rules
- also skip mysql database - in most cases the user will
+ Do not replicate databases excluded by rules. We also test
+ replicate_wild_*_table rules (replicate_wild_ignore_table='db1.%' will
+ be considered as "ignore the 'db1' database as a whole, as it already
+ works for CREATE DATABASE and DROP DATABASE).
+ Also skip 'mysql' database - in most cases the user will
mess up and not exclude mysql database with the rules when
he actually means to - in this case, he is up for a surprise if
his priv tables get dropped and downloaded from master
@@ -847,14 +851,14 @@ int load_master_data(THD* thd)
*/
if (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
+ !db_ok_with_wild_table(db) ||
!strcmp(db,"mysql"))
{
*cur_table_res = 0;
continue;
}
- if (mysql_rm_db(thd, db, 1,1) ||
- mysql_create_db(thd, db, 0, 1))
+ if (mysql_create_db(thd, db, HA_LEX_CREATE_IF_NOT_EXISTS, 1))
{
send_error(thd, 0, 0);
cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
diff --git a/sql/slave.cc b/sql/slave.cc
index 641707aab2f..a221abec2b5 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -72,7 +72,7 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
void* thread_killed_arg);
static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name);
+ const char* table_name, bool overwrite);
static int check_master_version(MYSQL* mysql, MASTER_INFO* mi);
@@ -1102,12 +1102,22 @@ static int check_master_version(MYSQL* mysql, MASTER_INFO* mi)
return 0;
}
+/*
+ Used by fetch_master_table (used by LOAD TABLE tblname FROM MASTER and LOAD
+ DATA FROM MASTER). Drops the table (if 'overwrite' is true) and recreates it
+ from the dump. Honours replication inclusion/exclusion rules.
+
+ RETURN VALUES
+ 0 success
+ 1 error
+*/
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name)
+ const char* table_name, bool overwrite)
{
ulong packet_len;
char *query;
+ char* save_db;
Vio* save_vio;
HA_CHECK_OPT check_opt;
TABLE_LIST tables;
@@ -1144,12 +1154,12 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
thd->query_error = 0;
thd->net.no_send_ok = 1;
- /* we do not want to log create table statement */
+ /* Create the table. We do not want to log the "create table" statement */
save_options = thd->options;
thd->options &= ~(ulong) (OPTION_BIN_LOG);
thd->proc_info = "Creating table from master dump";
// save old db in case we are creating in a different database
- char* save_db = thd->db;
+ save_db = thd->db;
thd->db = (char*)db;
mysql_parse(thd, thd->query, packet_len); // run create table
thd->db = save_db; // leave things the way the were before
@@ -1158,11 +1168,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
if (thd->query_error)
goto err; // mysql_parse took care of the error send
- bzero((char*) &tables,sizeof(tables));
- tables.db = (char*)db;
- tables.alias= tables.real_name= (char*)table_name;
- tables.lock_type = TL_WRITE;
thd->proc_info = "Opening master dump table";
+ tables.lock_type = TL_WRITE;
if (!open_ltable(thd, &tables, TL_WRITE))
{
send_error(thd,0,0); // Send error from open_ltable
@@ -1172,10 +1179,11 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
file = tables.table->file;
thd->proc_info = "Reading master dump table data";
+ /* Copy the data file */
if (file->net_read_dump(net))
{
net_printf(thd, ER_MASTER_NET_READ);
- sql_print_error("create_table_from_dump::failed in\
+ sql_print_error("create_table_from_dump: failed in\
handler::net_read_dump()");
goto err;
}
@@ -1190,6 +1198,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
*/
save_vio = thd->net.vio;
thd->net.vio = 0;
+ /* Rebuild the index file from the copied data file (with REPAIR) */
error=file->repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
if (error)
@@ -1203,7 +1212,7 @@ err:
int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
- MASTER_INFO *mi, MYSQL *mysql)
+ MASTER_INFO *mi, MYSQL *mysql, bool overwrite)
{
int error= 1;
const char *errmsg=0;
@@ -1235,8 +1244,9 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
errmsg= "Failed on table dump request";
goto err;
}
- if (create_table_from_dump(thd, mysql, db_name, table_name))
- goto err; // create_table_from_dump will have sent the error already
+ if (create_table_from_dump(thd, mysql, db_name,
+ table_name, overwrite))
+ goto err; // create_table_from_dump have sent the error already
error = 0;
err:
diff --git a/sql/slave.h b/sql/slave.h
index b52648005d3..05cf7a23b0f 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -199,7 +199,8 @@ typedef struct st_relay_log_info
/*
Handling of the relay_log_space_limit optional constraint.
ignore_log_space_limit is used to resolve a deadlock between I/O and SQL
- threads, it makes the I/O thread temporarily forget about the constraint
+ threads, the SQL thread sets it to unblock the I/O thread and make it
+ temporarily forget about the constraint.
*/
ulonglong log_space_limit,log_space_total;
bool ignore_log_space_limit;
@@ -478,9 +479,9 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock,
int mysql_table_dump(THD* thd, const char* db,
const char* tbl_name, int fd = -1);
-/* retrieve non-exitent table from master */
+/* retrieve table from master and copy to slave*/
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
- MASTER_INFO* mi, MYSQL* mysql);
+ MASTER_INFO* mi, MYSQL* mysql, bool overwrite);
void table_rule_ent_hash_to_str(String* s, HASH* h);
void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 4b4e4e7eb8e..a926c6e66fe 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -499,15 +499,13 @@ void close_temporary_tables(THD *thd)
{
// skip temporary tables not created directly by the user
if (table->real_name[0] != '#')
- {
- /*
- Here we assume table_cache_key always starts
- with \0 terminated db name
- */
found_user_tables = 1;
- }
- end = strxmov(end,table->table_cache_key,".",
- table->real_name,",", NullS);
+ /*
+ Here we assume table_cache_key always starts
+ with \0 terminated db name
+ */
+ end = strxmov(end,"`",table->table_cache_key,"`",
+ ".`",table->real_name,"`,", NullS);
}
next=table->next;
close_temporary(table);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 4fb12523086..0596e514c1a 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1034,9 +1034,14 @@ class user_var_entry
char *value;
ulong length, update_query_id, used_query_id;
Item_result type;
+
+ double val(my_bool *null_value);
+ longlong val_int(my_bool *null_value);
+ String *val_str(my_bool *null_value, String *str, uint decimals);
DTCollation collation;
};
+
/* Class for unique (removing of duplicates) */
class Unique :public Sql_alloc
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 60e0a7c7e94..0a5c544c2e7 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -308,22 +308,31 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (!opt_old_rpl_compat && mysql_bin_log.is_open())
{
+ /*
+ Make sure last block (the one which caused the error) gets logged.
+ This is needed because otherwise after write of
+ (to the binlog, not to read_info (which is a cache))
+ Delete_file_log_event the bad block will remain in read_info (because
+ pre_read is not called at the end of the last block; remember pre_read
+ is called whenever a new block is read from disk).
+ At the end of mysql_load(), the destructor of read_info will call
+ end_io_cache() which will flush read_info, so we will finally have
+ this in the binlog:
+ Append_block # The last successfull block
+ Delete_file
+ Append_block # The failing block
+ which is nonsense.
+ Or could also be (for a small file)
+ Create_file # The failing block
+ which is nonsense (Delete_file is not written in this case, because:
+ Create_file has not been written, so Delete_file is not written, then
+ when read_info is destroyed end_io_cache() is called which writes
+ Create_file.
+ */
+ read_info.end_io_cache();
+ /* If the file was not empty, wrote_create_file is true */
if (lf_info.wrote_create_file)
{
- /*
- Make sure last block (the one which caused the error) gets logged.
- This is needed because otherwise after write of
- (to the binlog, not to read_info (which is a cache))
- Delete_file_log_event the bad block will remain in read_info.
- At the end of mysql_load(), the destructor of read_info will call
- end_io_cache() which will flush read_info, so we will finally have
- this in the binlog:
- Append_block # The last successfull block
- Delete_file
- Append_block # The failing block
- which is nonsense.
- */
- read_info.end_io_cache();
Delete_file_log_event d(thd, db, log_delayed);
mysql_bin_log.write(&d);
}
@@ -355,7 +364,12 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
}
else
{
- read_info.end_io_cache(); // make sure last block gets logged
+ /*
+ As already explained above, we need to call end_io_cache() or the last
+ block will be logged only after Execute_load_log_event (which is wrong),
+ when read_info is destroyed.
+ */
+ read_info.end_io_cache();
if (lf_info.wrote_create_file)
{
Execute_load_log_event e(thd, db, log_delayed);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index aa91e307095..b36d4041950 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1987,9 +1987,12 @@ mysql_execute_command(THD *thd)
break;
}
LOCK_ACTIVE_MI;
- // fetch_master_table will send the error to the client on failure
+ /*
+ fetch_master_table will send the error to the client on failure.
+ Give error if the table already exists.
+ */
if (!fetch_master_table(thd, tables->db, tables->real_name,
- active_mi, 0))
+ active_mi, 0, 0))
{
send_ok(thd);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 398c57e9037..a97635c05bc 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2574,7 +2574,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
best=best_time=records=DBL_MAX;
KEYUSE *best_key=0;
uint best_max_key_part=0;
- my_bool found_constrain= 0;
+ my_bool found_constraint= 0;
if (s->keyuse)
{ /* Use key if possible */
@@ -2639,7 +2639,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
}
else
{
- found_constrain= 1;
+ found_constraint= 1;
/*
Check if we found full key
*/
@@ -2801,29 +2801,50 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
s->table->used_keys && best_key) &&
!(s->table->force_index && best_key))
{ // Check full join
- ha_rows rnd_records= s->found_records;
- if (s->on_expr)
- {
- tmp=rows2double(rnd_records); // Can't use read cache
- }
- else
- {
- tmp=(double) s->read_time;
- /* Calculate time to read previous rows through cache */
- tmp*=(1.0+floor((double) cache_record_length(join,idx)*
- record_count /
- (double) thd->variables.join_buff_size));
- }
-
- /*
- If there is a restriction on the table, assume that 25% of the
- rows can be skipped on next part.
- This is to force tables that this table depends on before this
- table
- */
- if (found_constrain)
- rnd_records-= rnd_records/4;
+ ha_rows rnd_records= s->found_records;
+ /* Estimate cost of reading table. */
+ tmp= s->table->file->scan_time();
+ /*
+ If there is a restriction on the table, assume that 25% of the
+ rows can be skipped on next part.
+ This is to force tables that this table depends on before this
+ table
+ */
+ if (found_constraint)
+ rnd_records-= rnd_records/4;
+
+ if (s->on_expr) // Can't use join cache
+ {
+ tmp= record_count *
+ /* We have to read the whole table for each record */
+ (tmp +
+ /*
+ And we have to skip rows which does not satisfy join
+ condition for each record.
+ */
+ (s->records - rnd_records)/(double) TIME_FOR_COMPARE);
+ }
+ else
+ {
+ /* We read the table as many times as join buffer becomes full. */
+ tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
+ record_count /
+ (double) thd->variables.join_buff_size));
+ /*
+ We don't make full cartesian product between rows in the scanned
+ table and existing records because we skip all rows from the
+ scanned table, which does not satisfy join condition when
+ we read the table (see flush_cached_records for details). Here we
+ take into account cost to read and skip these records.
+ */
+ tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
+ }
+ /*
+ We estimate the cost of evaluating WHERE clause for found records
+ as record_count * rnd_records + TIME_FOR_COMPARE. This cost plus
+ tmp give us total cost of using TABLE SCAN
+ */
if (best == DBL_MAX ||
(tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
best + record_count/(double) TIME_FOR_COMPARE*records))
@@ -4453,6 +4474,8 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
new_field->field_name=item->name;
if (org_field->maybe_null())
new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join
+ if (org_field->type()==FIELD_TYPE_VAR_STRING)
+ table->db_create_options|= HA_OPTION_PACK_RECORD;
}
return new_field;
}