summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <mikael@c-870ae253.1238-1-64736c10.cust.bredbandsbolaget.se>2006-04-21 09:30:19 -0400
committerunknown <mikael@c-870ae253.1238-1-64736c10.cust.bredbandsbolaget.se>2006-04-21 09:30:19 -0400
commit34a11a322d5a534cceec4d13293a491cf5077c0d (patch)
tree3339db78538bb6460acbe9b5f921a17b228744fd /sql
parent328da025e0f5d69507d865977f9b68f4af9126d9 (diff)
parent17e3ee35531d0003de31093570e64f604bd8c4fa (diff)
downloadmariadb-git-34a11a322d5a534cceec4d13293a491cf5077c0d.tar.gz
Merge mronstrom@bk-internal.mysql.com:/home/bk/bugs/bug18198
into c-870ae253.1238-1-64736c10.cust.bredbandsbolaget.se:/home/pappa/bug18198 sql/item_timefunc.h: Auto merged sql/partition_info.cc: Auto merged sql/share/errmsg.txt: manual merge
Diffstat (limited to 'sql')
-rw-r--r--sql/event_executor.cc2
-rw-r--r--sql/field.cc63
-rw-r--r--sql/field.h14
-rw-r--r--sql/ha_archive.cc89
-rw-r--r--sql/ha_archive.h5
-rw-r--r--sql/ha_ndbcluster.cc151
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/ha_ndbcluster_binlog.cc405
-rw-r--r--sql/ha_ndbcluster_binlog.h6
-rw-r--r--sql/ha_partition.cc22
-rw-r--r--sql/ha_partition.h3
-rw-r--r--sql/handler.h4
-rw-r--r--sql/hostname.cc2
-rw-r--r--sql/item_cmpfunc.cc16
-rw-r--r--sql/item_row.cc16
-rw-r--r--sql/item_row.h3
-rw-r--r--sql/item_timefunc.cc35
-rw-r--r--sql/item_timefunc.h1
-rw-r--r--sql/item_xmlfunc.cc80
-rw-r--r--sql/key.cc3
-rw-r--r--sql/lex.h2
-rw-r--r--sql/log.cc48
-rw-r--r--sql/log_event.cc31
-rw-r--r--sql/log_event.h4
-rw-r--r--sql/my_lock.c2
-rw-r--r--sql/mysql_priv.h8
-rw-r--r--sql/mysqld.cc151
-rw-r--r--sql/net_serv.cc10
-rw-r--r--sql/opt_range.cc2
-rw-r--r--sql/partition_info.cc12
-rw-r--r--sql/repl_failsafe.cc2
-rw-r--r--sql/set_var.cc39
-rw-r--r--sql/set_var.h57
-rw-r--r--sql/share/errmsg.txt8
-rw-r--r--sql/slave.cc2
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_cache.cc5
-rw-r--r--sql/sql_class.cc116
-rw-r--r--sql/sql_class.h29
-rw-r--r--sql/sql_cursor.cc3
-rw-r--r--sql/sql_delete.cc1
-rw-r--r--sql/sql_handler.cc1
-rw-r--r--sql/sql_insert.cc4
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_load.cc4
-rw-r--r--sql/sql_parse.cc21
-rw-r--r--sql/sql_partition.cc19
-rw-r--r--sql/sql_plugin.cc12
-rw-r--r--sql/sql_prepare.cc17
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_show.cc5
-rw-r--r--sql/sql_table.cc57
-rw-r--r--sql/sql_yacc.yy7
-rw-r--r--sql/udf_example.cc2
-rw-r--r--sql/unireg.cc2
-rwxr-xr-xsql/watchdog_mysqld126
56 files changed, 1039 insertions, 705 deletions
diff --git a/sql/event_executor.cc b/sql/event_executor.cc
index 498760bb6fd..21464dd777b 100644
--- a/sql/event_executor.cc
+++ b/sql/event_executor.cc
@@ -291,7 +291,7 @@ init_event_thread(THD* thd)
DBUG_RETURN(-1);
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
diff --git a/sql/field.cc b/sql/field.cc
index 1f67f83aabd..9c504f186b3 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -5989,7 +5989,7 @@ int Field_str::store(double nr)
uint Field::is_equal(create_field *new_field)
{
- return (new_field->sql_type == type());
+ return (new_field->sql_type == real_type());
}
@@ -6001,7 +6001,7 @@ uint Field_str::is_equal(create_field *new_field)
(flags & (BINCMP_FLAG | BINARY_FLAG))))
return 0; /* One of the fields is binary and the other one isn't */
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
new_field->charset == field_charset &&
new_field->length == max_length());
}
@@ -6798,7 +6798,7 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
uint Field_varstring::is_equal(create_field *new_field)
{
- if (new_field->sql_type == type() &&
+ if (new_field->sql_type == real_type() &&
new_field->charset == field_charset)
{
if (new_field->length == max_length())
@@ -7957,12 +7957,12 @@ bool Field_num::eq_def(Field *field)
uint Field_num::is_equal(create_field *new_field)
{
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
((new_field->flags & UNSIGNED_FLAG) == (uint) (flags &
UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->length >= max_length()));
+ (new_field->length <= max_length()));
}
@@ -7998,9 +7998,10 @@ uint Field_num::is_equal(create_field *new_field)
Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg, const char *field_name_arg)
- : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg,
+ : Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg),
- bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7)
+ bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7),
+ bytes_in_rec(len_arg / 8)
{
/*
Ensure that Field::eq() can distinguish between two different bit fields.
@@ -8036,14 +8037,14 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
int delta;
for (; length && !*from; from++, length--); // skip left 0's
- delta= field_length - length;
+ delta= bytes_in_rec - length;
if (delta < -1 ||
(delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) ||
(!bit_len && delta < 0))
{
set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len);
- memset(ptr, 0xff, field_length);
+ memset(ptr, 0xff, bytes_in_rec);
if (table->in_use->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
else
@@ -8071,7 +8072,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
set_rec_bits((uchar) *from, bit_ptr, bit_ofs, bit_len);
from++;
}
- memcpy(ptr, from, field_length);
+ memcpy(ptr, from, bytes_in_rec);
}
return 0;
}
@@ -8112,10 +8113,10 @@ longlong Field_bit::val_int(void)
if (bit_len)
{
bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
- bits<<= (field_length * 8);
+ bits<<= (bytes_in_rec * 8);
}
- switch (field_length) {
+ switch (bytes_in_rec) {
case 0: return bits;
case 1: return bits | (ulonglong) (uchar) ptr[0];
case 2: return bits | mi_uint2korr(ptr);
@@ -8124,7 +8125,7 @@ longlong Field_bit::val_int(void)
case 5: return bits | mi_uint5korr(ptr);
case 6: return bits | mi_uint6korr(ptr);
case 7: return bits | mi_uint7korr(ptr);
- default: return mi_uint8korr(ptr + field_length - sizeof(longlong));
+ default: return mi_uint8korr(ptr + bytes_in_rec - sizeof(longlong));
}
}
@@ -8206,7 +8207,7 @@ int Field_bit::cmp_offset(uint row_offset)
if ((flag= (int) (bits_a - bits_b)))
return flag;
}
- return memcmp(ptr, ptr + row_offset, field_length);
+ return memcmp(ptr, ptr + row_offset, bytes_in_rec);
}
@@ -8218,7 +8219,7 @@ void Field_bit::get_key_image(char *buff, uint length, imagetype type)
*buff++= bits;
length--;
}
- memcpy(buff, ptr, min(length, field_length));
+ memcpy(buff, ptr, min(length, bytes_in_rec));
}
@@ -8226,22 +8227,22 @@ void Field_bit::sql_type(String &res) const
{
CHARSET_INFO *cs= res.charset();
ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
- "bit(%d)",
- (int) field_length * 8 + bit_len);
+ "bit(%d)", (int) field_length);
res.length((uint) length);
}
char *Field_bit::pack(char *to, const char *from, uint max_length)
{
- uint length= min(field_length + (bit_len > 0), max_length);
+ DBUG_ASSERT(max_length);
+ uint length;
if (bit_len)
{
uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
*to++= bits;
- length--;
}
- memcpy(to, from, length);
+ length= min(bytes_in_rec, max_length - (bit_len > 0));
+ memcpy(to, from, length);
return to + length;
}
@@ -8253,8 +8254,8 @@ const char *Field_bit::unpack(char *to, const char *from)
set_rec_bits(*from, bit_ptr, bit_ofs, bit_len);
from++;
}
- memcpy(to, from, field_length);
- return from + field_length;
+ memcpy(to, from, bytes_in_rec);
+ return from + bytes_in_rec;
}
@@ -8267,26 +8268,25 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
enum utype unireg_check_arg,
const char *field_name_arg)
:Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0, 0,
- unireg_check_arg, field_name_arg),
- create_length(len_arg)
+ unireg_check_arg, field_name_arg)
{
bit_len= 0;
- field_length= ((len_arg + 7) & ~7) / 8;
+ bytes_in_rec= (len_arg + 7) / 8;
}
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
int delta;
- uchar bits= create_length & 7;
+ uchar bits= field_length & 7;
for (; length && !*from; from++, length--); // skip left 0's
- delta= field_length - length;
+ delta= bytes_in_rec - length;
if (delta < 0 ||
(delta == 0 && bits && (uint) (uchar) *from >= (uint) (1 << bits)))
{
- memset(ptr, 0xff, field_length);
+ memset(ptr, 0xff, bytes_in_rec);
if (bits)
*ptr&= ((1 << bits) - 1); /* set first byte */
if (table->in_use->really_abort_on_warning())
@@ -8305,7 +8305,7 @@ void Field_bit_as_char::sql_type(String &res) const
{
CHARSET_INFO *cs= res.charset();
ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
- "bit(%d)", (int) create_length);
+ "bit(%d)", (int) field_length);
res.length((uint) length);
}
@@ -9033,11 +9033,6 @@ create_field::create_field(Field *old_field,Field *orig_field)
geom_type= ((Field_geom*)old_field)->geom_type;
break;
#endif
- case FIELD_TYPE_BIT:
- length= (old_field->key_type() == HA_KEYTYPE_BIT) ?
- ((Field_bit *) old_field)->bit_len + length * 8 :
- ((Field_bit_as_char *) old_field)->create_length;
- break;
default:
break;
}
diff --git a/sql/field.h b/sql/field.h
index 936babdd26a..b473100eaab 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1346,16 +1346,17 @@ public:
uchar *bit_ptr; // position in record where 'uneven' bits store
uchar bit_ofs; // offset to 'uneven' high bits
uint bit_len; // number of 'uneven' high bits
+ uint bytes_in_rec;
Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg, const char *field_name_arg);
enum_field_types type() const { return FIELD_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
- uint32 key_length() const { return (uint32) field_length + (bit_len > 0); }
- uint32 max_length() { return (uint32) field_length * 8 + bit_len; }
+ uint32 key_length() const { return (uint32) (field_length + 7) / 8; }
+ uint32 max_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
Item_result result_type () const { return INT_RESULT; }
- void reset(void) { bzero(ptr, field_length); }
+ void reset(void) { bzero(ptr, bytes_in_rec); }
int store(const char *to, uint length, CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -1378,9 +1379,8 @@ public:
{ Field_bit::store(buff, length, &my_charset_bin); }
void sort_string(char *buff, uint length)
{ get_key_image(buff, length, itRAW); }
- uint32 pack_length() const
- { return (uint32) field_length + (bit_len > 0); }
- uint32 pack_length_in_rec() const { return field_length; }
+ uint32 pack_length() const { return (uint32) (field_length + 7) / 8; }
+ uint32 pack_length_in_rec() const { return bytes_in_rec; }
void sql_type(String &str) const;
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
const char *unpack(char* to, const char *from);
@@ -1402,12 +1402,10 @@ public:
class Field_bit_as_char: public Field_bit {
public:
- uchar create_length;
Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg);
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
- uint32 max_length() { return (uint32) create_length; }
uint size_of() const { return sizeof(*this); }
int store(const char *to, uint length, CHARSET_INFO *charset);
int store(double nr) { return Field_bit::store(nr); }
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index 403855b6a01..942faaae517 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -63,8 +63,7 @@
pool. For MyISAM its a question of how much the file system caches the
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
doesn't have enough memory to cache entire table that archive turns out
- to be any faster. For writes it is always a bit slower then MyISAM. It has no
- internal limits though for row length.
+ to be any faster.
Examples between MyISAM (packed) and Archive.
@@ -81,11 +80,8 @@
TODO:
Add bzip optional support.
Allow users to set compression level.
- Add truncate table command.
Implement versioning, should be easy.
Allow for errors, find a way to mark bad rows.
- Talk to the azip guys, come up with a writable format so that updates are doable
- without switching to a block method.
Add optional feature so that rows can be flushed at interval (which will cause less
compression but may speed up ordered searches).
Checkpoint the meta file to allow for faster rebuilds.
@@ -126,10 +122,12 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
/*
- uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
+ uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN
+ + uchar
*/
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
- + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
+ + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + FN_REFLEN \
+ + sizeof(uchar)
/*
uchar + uchar
@@ -317,7 +315,8 @@ error:
*/
int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment,
- ulonglong *forced_flushes)
+ ulonglong *forced_flushes,
+ char *real_path)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
@@ -342,6 +341,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ptr+= sizeof(ulonglong); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past forced_flush
+ memmove(real_path, ptr, FN_REFLEN);
+ ptr+= FN_REFLEN; // Move past the possible location of the file
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
@@ -349,6 +350,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
@@ -368,6 +370,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
+ char *real_path,
bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
@@ -388,6 +391,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ptr += sizeof(ulonglong);
int8store(ptr, forced_flushes);
ptr += sizeof(ulonglong);
+ // No matter what, we pad with nulls
+ if (real_path)
+ strncpy((char *)ptr, real_path, FN_REFLEN);
+ else
+ bzero(ptr, FN_REFLEN);
+ ptr += FN_REFLEN;
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
@@ -399,6 +408,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
forced_flushes));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
+ real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
@@ -448,8 +459,12 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share->table_name_length= length;
share->table_name= tmp_name;
share->crashed= FALSE;
- fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ fn_format(share->data_file_name, table_name, "",
+ ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ fn_format(meta_file_name, table_name, "", ARM,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ DBUG_PRINT("info", ("archive opening (1) up write at %s",
+ share->data_file_name));
strmov(share->table_name,table_name);
/*
We will use this lock for rows.
@@ -457,6 +472,8 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
share->crashed= TRUE;
+ DBUG_PRINT("info", ("archive opening (1) up write at %s",
+ share->data_file_name));
/*
After we read, we set the file to dirty. When we close, we will do the
@@ -465,13 +482,21 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
*/
if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->auto_increment_value,
- &share->forced_flushes))
+ &share->forced_flushes,
+ share->real_path))
share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
+ share->real_path,
TRUE);
+ /*
+ Since we now possibly no real_path, we will use it instead if it exists.
+ */
+ if (*share->real_path)
+ fn_format(share->data_file_name, share->real_path, "", ARZ,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
@@ -527,6 +552,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
+ share->real_path,
share->crashed ? TRUE :FALSE);
if (azclose(&(share->archive_write)))
rc= 1;
@@ -566,7 +592,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
int rc= 0;
DBUG_ENTER("ha_archive::open");
- DBUG_PRINT("info", ("archive table was opened for crash %s",
+ DBUG_PRINT("info", ("archive table was opened for crash: %s",
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
share= get_share(name, table, &rc);
@@ -582,6 +608,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
thr_lock_data_init(&share->lock,&lock,NULL);
+ DBUG_PRINT("info", ("archive data_file_name %s", share->data_file_name));
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
{
if (errno == EROFS || errno == EACCES)
@@ -679,18 +706,40 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
- write_meta_file(create_file, 0, auto_increment_value, 0, FALSE);
+ write_meta_file(create_file, 0, auto_increment_value, 0,
+ (char *)create_info->data_file_name,
+ FALSE);
my_close(create_file,MYF(0));
/*
We reuse name_buff since it is available.
*/
- if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ if (create_info->data_file_name)
{
- error= my_errno;
- goto error;
+ char linkname[FN_REFLEN];
+ DBUG_PRINT("info", ("archive will create stream file %s",
+ create_info->data_file_name));
+
+ fn_format(name_buff, create_info->data_file_name, "", ARZ,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ fn_format(linkname, name, "", ARZ,
+ MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ if ((create_file= my_create_with_symlink(linkname, name_buff, 0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ {
+ error= my_errno;
+ goto error;
+ }
+ }
+ else
+ {
+ if ((create_file= my_create(fn_format(name_buff, name,"", ARZ,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ {
+ error= my_errno;
+ goto error;
+ }
}
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
{
@@ -1348,8 +1397,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
ha_archive::info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
- create_info->auto_increment_value=auto_increment_value;
+ create_info->auto_increment_value= auto_increment_value;
}
+ if (*share->real_path)
+ create_info->data_file_name= share->real_path;
}
diff --git a/sql/ha_archive.h b/sql/ha_archive.h
index 9b351b7e8da..c3f4e82d997 100644
--- a/sql/ha_archive.h
+++ b/sql/ha_archive.h
@@ -41,6 +41,7 @@ typedef struct st_archive_share {
ulonglong auto_increment_value;
ulonglong forced_flushes;
ulonglong mean_rec_length;
+ char real_path[FN_REFLEN];
} ARCHIVE_SHARE;
/*
@@ -102,10 +103,12 @@ public:
int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment,
- ulonglong *forced_flushes);
+ ulonglong *forced_flushes,
+ char *real_path);
int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
+ char *real_path,
bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a4193e4eb33..587eabb82d2 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -556,6 +556,14 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
ERR_PRINT(err);
switch (err.classification) {
case NdbError::SchemaError:
+ {
+ /* Close other open handlers not used by any thread */
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= m_dbname;
+ table_list.alias= table_list.table_name= m_tabname;
+ close_cached_tables(current_thd, 0, &table_list);
+
invalidate_dictionary_cache(TRUE);
if (err.code==284)
@@ -576,6 +584,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
DBUG_PRINT("info", ("Table exists but must have changed"));
}
break;
+ }
default:
break;
}
@@ -4411,7 +4420,7 @@ static int create_ndb_column(NDBCOL &col,
break;
case MYSQL_TYPE_BIT:
{
- int no_of_bits= field->field_length*8 + ((Field_bit *) field)->bit_len;
+ int no_of_bits= field->field_length;
col.setType(NDBCOL::Bit);
if (!no_of_bits)
col.setLength(1);
@@ -4480,6 +4489,21 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
}
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow table creation unless
+ schema distribution table is setup
+ ( unless it is a creation of the schema dist table itself )
+ */
+ if (!schema_share &&
+ !(strcmp(m_dbname, NDB_REP_DB) == 0 &&
+ strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif /* HAVE_NDB_BINLOG */
+
DBUG_PRINT("table", ("name: %s", m_tabname));
tab.setName(m_tabname);
tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE));
@@ -4687,8 +4711,9 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
}
-int ha_ndbcluster::create_handler_files(const char *file)
+int ha_ndbcluster::create_handler_files(const char *file, HA_CREATE_INFO *info)
{
+ char path[FN_REFLEN];
const char *name;
Ndb* ndb;
const NDBTAB *tab;
@@ -4698,16 +4723,21 @@ int ha_ndbcluster::create_handler_files(const char *file)
DBUG_ENTER("create_handler_files");
+ DBUG_PRINT("enter", ("file: %s", file));
if (!(ndb= get_ndb()))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
NDBDICT *dict= ndb->getDictionary();
- if (!(tab= dict->getTable(m_tabname)))
+ if (!info->frm_only)
DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create
+ set_dbname(file);
+ set_tabname(file);
+ DBUG_PRINT("info", ("m_dbname: %s, m_tabname: %s", m_dbname, m_tabname));
+ if (!(tab= dict->getTable(m_tabname)))
+ DBUG_RETURN(0); // Unkown table, must be temporary table
+
DBUG_ASSERT(get_ndb_share_state(m_share) == NSS_ALTERED);
- name= table->s->normalized_path.str;
- DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, name));
- if (readfrm(name, &data, &length) ||
+ if (readfrm(file, &data, &length) ||
packfrm(data, length, &pack_data, &pack_length))
{
DBUG_PRINT("info", ("Missing frm for %s", m_tabname));
@@ -4723,6 +4753,7 @@ int ha_ndbcluster::create_handler_files(const char *file)
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
}
+
set_ndb_share_state(m_share, NSS_INITIAL);
free_share(&m_share); // Decrease ref_count
@@ -4830,6 +4861,15 @@ int ha_ndbcluster::create_ndb_index(const char *name,
}
/*
+ Prepare for an on-line alter table
+*/
+void ha_ndbcluster::prepare_for_alter()
+{
+ ndbcluster_get_share(m_share); // Increase ref_count
+ set_ndb_share_state(m_share, NSS_ALTERED);
+}
+
+/*
Add an index on-line to a table
*/
int ha_ndbcluster::add_index(TABLE *table_arg,
@@ -4841,7 +4881,7 @@ int ha_ndbcluster::add_index(TABLE *table_arg,
int error= 0;
uint idx;
- DBUG_ASSERT(m_share->state == NSS_INITIAL);
+ DBUG_ASSERT(m_share->state == NSS_ALTERED);
for (idx= 0; idx < num_of_keys; idx++)
{
KEY *key= key_info + idx;
@@ -4857,10 +4897,10 @@ int ha_ndbcluster::add_index(TABLE *table_arg,
if((error= create_index(key_info[idx].name, key, idx_type, idx)))
break;
}
- if (!error)
+ if (error)
{
- ndbcluster_get_share(m_share); // Increase ref_count
- set_ndb_share_state(m_share, NSS_ALTERED);
+ set_ndb_share_state(m_share, NSS_INITIAL);
+ free_share(&m_share); // Decrease ref_count
}
DBUG_RETURN(error);
}
@@ -4885,7 +4925,7 @@ int ha_ndbcluster::prepare_drop_index(TABLE *table_arg,
uint *key_num, uint num_of_keys)
{
DBUG_ENTER("ha_ndbcluster::prepare_drop_index");
- DBUG_ASSERT(m_share->state == NSS_INITIAL);
+ DBUG_ASSERT(m_share->state == NSS_ALTERED);
// Mark indexes for deletion
uint idx;
for (idx= 0; idx < num_of_keys; idx++)
@@ -4898,8 +4938,6 @@ int ha_ndbcluster::prepare_drop_index(TABLE *table_arg,
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
renumber_indexes(ndb, table_arg);
- ndbcluster_get_share(m_share); // Increase ref_count
- set_ndb_share_state(m_share, NSS_ALTERED);
DBUG_RETURN(0);
}
@@ -5004,7 +5042,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
is_old_table_tmpfile= 0;
String event_name(INJECTOR_EVENT_LEN);
ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
- ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share);
+ ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share,
+ "rename table");
}
if (!result && !IS_TMP_PREFIX(new_tabname))
@@ -5088,6 +5127,15 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
NDBDICT *dict= ndb->getDictionary();
#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
NDB_SHARE *share= get_share(path, 0, false);
#endif
@@ -5156,7 +5204,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0);
ndbcluster_handle_drop_table(ndb,
table_dropped ? event_name.c_ptr() : 0,
- share);
+ share, "delete table");
}
if (share)
@@ -5185,6 +5233,18 @@ int ha_ndbcluster::delete_table(const char *name)
set_dbname(name);
set_tabname(name);
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
+
if (check_ndb_connection())
DBUG_RETURN(HA_ERR_NO_CONNECTION);
@@ -5406,6 +5466,11 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
+#ifdef HAVE_NDB_BINLOG
+ if (!ndb_binlog_tables_inited && ndb_binlog_running)
+ table->db_stat|= HA_READ_ONLY;
+#endif
+
DBUG_RETURN(res);
}
@@ -5704,6 +5769,19 @@ int ndbcluster_drop_database_impl(const char *path)
static void ndbcluster_drop_database(char *path)
{
+ DBUG_ENTER("ndbcluster_drop_database");
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop database unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_VOID_RETURN;
+ //DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
ndbcluster_drop_database_impl(path);
#ifdef HAVE_NDB_BINLOG
char db[FN_REFLEN];
@@ -5712,6 +5790,7 @@ static void ndbcluster_drop_database(char *path)
current_thd->query, current_thd->query_length,
db, "", 0, 0, SOT_DROP_DB);
#endif
+ DBUG_VOID_RETURN;
}
/*
find all tables in ndb and discover those needed
@@ -5733,36 +5812,37 @@ int ndbcluster_find_all_files(THD *thd)
DBUG_ENTER("ndbcluster_find_all_files");
Ndb* ndb;
char key[FN_REFLEN];
- NdbDictionary::Dictionary::List list;
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
NDBDICT *dict= ndb->getDictionary();
- int unhandled, retries= 5;
+ int unhandled, retries= 5, skipped;
do
{
+ NdbDictionary::Dictionary::List list;
if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
ERR_RETURN(dict->getNdbError());
unhandled= 0;
+ skipped= 0;
+ retries--;
for (uint i= 0 ; i < list.count ; i++)
{
NDBDICT::List::Element& elmt= list.elements[i];
- int do_handle_table= 0;
if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
{
DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name));
continue;
}
DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name));
- if (elmt.state == NDBOBJ::StateOnline ||
- elmt.state == NDBOBJ::StateBackup)
- do_handle_table= 1;
- else if (!(elmt.state == NDBOBJ::StateBuilding))
+ if (elmt.state != NDBOBJ::StateOnline &&
+ elmt.state != NDBOBJ::StateBackup &&
+ elmt.state != NDBOBJ::StateBuilding)
{
sql_print_information("NDB: skipping setup table %s.%s, in state %d",
elmt.database, elmt.name, elmt.state);
+ skipped++;
continue;
}
@@ -5771,7 +5851,7 @@ int ndbcluster_find_all_files(THD *thd)
if (!(ndbtab= dict->getTable(elmt.name)))
{
- if (do_handle_table)
+ if (retries == 0)
sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
elmt.database, elmt.name,
dict->getNdbError().code,
@@ -5840,9 +5920,9 @@ int ndbcluster_find_all_files(THD *thd)
pthread_mutex_unlock(&LOCK_open);
}
}
- while (unhandled && retries--);
+ while (unhandled && retries);
- DBUG_RETURN(0);
+ DBUG_RETURN(-(skipped + unhandled));
}
int ndbcluster_find_files(THD *thd,const char *db,const char *path,
@@ -7706,6 +7786,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
pthread_cond_wait(&COND_server_started, &LOCK_server_started);
pthread_mutex_unlock(&LOCK_server_started);
+ ndbcluster_util_inited= 1;
+
/*
Wait for cluster to start
*/
@@ -7737,6 +7819,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
}
#ifdef HAVE_NDB_BINLOG
+ if (ndb_extra_logging && ndb_binlog_running)
+ sql_print_information("NDB Binlog: Ndb tables initially read only.");
/* create tables needed by the replication */
ndbcluster_setup_binlog_table_shares(thd);
#else
@@ -7746,17 +7830,9 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndbcluster_find_all_files(thd);
#endif
- ndbcluster_util_inited= 1;
-
-#ifdef HAVE_NDB_BINLOG
- /* Signal injector thread that all is setup */
- pthread_cond_signal(&injector_cond);
-#endif
-
set_timespec(abstime, 0);
for (;!abort_loop;)
{
-
pthread_mutex_lock(&LOCK_ndb_util_thread);
pthread_cond_timedwait(&COND_ndb_util_thread,
&LOCK_ndb_util_thread,
@@ -7774,7 +7850,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
Check that the apply_status_share and schema_share has been created.
If not try to create it
*/
- if (!apply_status_share || !schema_share)
+ if (!ndb_binlog_tables_inited)
ndbcluster_setup_binlog_table_shares(thd);
#endif
@@ -10029,14 +10105,15 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
}
}
- dict->listObjects(dflist, NdbDictionary::Object::Undofile);
+ NdbDictionary::Dictionary::List uflist;
+ dict->listObjects(uflist, NdbDictionary::Object::Undofile);
ndberr= dict->getNdbError();
if (ndberr.classification != NdbError::NoError)
ERR_RETURN(ndberr);
- for (i= 0; i < dflist.count; i++)
+ for (i= 0; i < uflist.count; i++)
{
- NdbDictionary::Dictionary::List::Element& elt= dflist.elements[i];
+ NdbDictionary::Dictionary::List::Element& elt= uflist.elements[i];
Ndb_cluster_connection_node_iter iter;
unsigned id;
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 7dfec18fcb6..b375e30338f 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -595,6 +595,7 @@ class ha_ndbcluster: public handler
const char * table_type() const;
const char ** bas_ext() const;
ulong table_flags(void) const;
+ void prepare_for_alter();
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
int final_drop_index(TABLE *table_arg);
@@ -609,7 +610,7 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
- int create_handler_files(const char *file);
+ int create_handler_files(const char *file, HA_CREATE_INFO *info);
int get_default_no_partitions(ulonglong max_rows);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 60ccb661703..ec5b5858f5c 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -48,6 +48,7 @@ int ndb_binlog_thread_running= 0;
FALSE if not
*/
my_bool ndb_binlog_running= FALSE;
+my_bool ndb_binlog_tables_inited= FALSE;
/*
Global reference to the ndb injector thread THD oject
@@ -775,32 +776,50 @@ static int ndbcluster_create_schema_table(THD *thd)
DBUG_RETURN(0);
}
-void ndbcluster_setup_binlog_table_shares(THD *thd)
+int ndbcluster_setup_binlog_table_shares(THD *thd)
{
- int done_find_all_files= 0;
if (!schema_share &&
ndbcluster_check_schema_share() == 0)
{
- if (!done_find_all_files)
+ pthread_mutex_lock(&LOCK_open);
+ ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_SCHEMA_TABLE);
+ pthread_mutex_unlock(&LOCK_open);
+ if (!schema_share)
{
- ndbcluster_find_all_files(thd);
- done_find_all_files= 1;
+ ndbcluster_create_schema_table(thd);
+ // always make sure we create the 'schema' first
+ if (!schema_share)
+ return 1;
}
- ndbcluster_create_schema_table(thd);
- // always make sure we create the 'schema' first
- if (!schema_share)
- return;
}
if (!apply_status_share &&
ndbcluster_check_apply_status_share() == 0)
{
- if (!done_find_all_files)
+ pthread_mutex_lock(&LOCK_open);
+ ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_APPLY_TABLE);
+ pthread_mutex_unlock(&LOCK_open);
+ if (!apply_status_share)
{
- ndbcluster_find_all_files(thd);
- done_find_all_files= 1;
+ ndbcluster_create_apply_status_table(thd);
+ if (!apply_status_share)
+ return 1;
}
- ndbcluster_create_apply_status_table(thd);
}
+ if (!ndbcluster_find_all_files(thd))
+ {
+ pthread_mutex_lock(&LOCK_open);
+ ndb_binlog_tables_inited= TRUE;
+ if (ndb_binlog_running)
+ {
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: ndb tables writable");
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ /* Signal injector thread that all is setup */
+ pthread_cond_signal(&injector_cond);
+ }
+ return 0;
}
/*
@@ -936,6 +955,31 @@ static char *ndb_pack_varchar(const NDBCOL *col, char *buf,
/*
log query in schema table
*/
+static void ndb_report_waiting(const char *key,
+ int the_time,
+ const char *op,
+ const char *obj)
+{
+ ulonglong ndb_latest_epoch= 0;
+ const char *proc_info= "<no info>";
+ pthread_mutex_lock(&injector_mutex);
+ if (injector_ndb)
+ ndb_latest_epoch= injector_ndb->getLatestGCI();
+ if (injector_thd)
+ proc_info= injector_thd->proc_info;
+ pthread_mutex_unlock(&injector_mutex);
+ sql_print_information("NDB %s:"
+ " waiting max %u sec for %s %s."
+ " epochs: (%u,%u,%u)"
+ " injector proc_info: %s"
+ ,key, the_time, op, obj
+ ,(uint)ndb_latest_handled_binlog_epoch
+ ,(uint)ndb_latest_received_binlog_epoch
+ ,(uint)ndb_latest_epoch
+ ,proc_info
+ );
+}
+
int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
const char *query, int query_length,
const char *db, const char *table_name,
@@ -965,6 +1009,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
}
char tmp_buf2[FN_REFLEN];
+ const char *type_str;
switch (type)
{
case SOT_DROP_TABLE:
@@ -975,6 +1020,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
query= tmp_buf2;
query_length= (uint) (strxmov(tmp_buf2, "drop table `",
table_name, "`", NullS) - tmp_buf2);
+ type_str= "drop table";
break;
case SOT_RENAME_TABLE:
/* redo the rename table query as is may contain several tables */
@@ -982,20 +1028,28 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
query_length= (uint) (strxmov(tmp_buf2, "rename table `",
old_db, ".", old_table_name, "` to `",
db, ".", table_name, "`", NullS) - tmp_buf2);
+ type_str= "rename table";
break;
case SOT_CREATE_TABLE:
- // fall through
+ type_str= "create table";
+ break;
case SOT_ALTER_TABLE:
+ type_str= "create table";
break;
case SOT_DROP_DB:
+ type_str= "drop db";
break;
case SOT_CREATE_DB:
+ type_str= "create db";
break;
case SOT_ALTER_DB:
+ type_str= "alter db";
break;
case SOT_TABLESPACE:
+ type_str= "tablespace";
break;
case SOT_LOGFILE_GROUP:
+ type_str= "logfile group";
break;
default:
abort(); /* should not happen, programming error */
@@ -1174,9 +1228,9 @@ end:
struct timespec abstime;
int i;
set_timespec(abstime, 1);
- (void) pthread_cond_timedwait(&injector_cond,
- &ndb_schema_object->mutex,
- &abstime);
+ int ret= pthread_cond_timedwait(&injector_cond,
+ &ndb_schema_object->mutex,
+ &abstime);
(void) pthread_mutex_lock(&schema_share->mutex);
for (i= 0; i < ndb_number_of_storage_nodes; i++)
@@ -1198,16 +1252,19 @@ end:
if (bitmap_is_clear_all(&ndb_schema_object->slock_bitmap))
break;
- max_timeout--;
- if (max_timeout == 0)
+ if (ret)
{
- sql_print_error("NDB create table: timed out. Ignoring...");
- break;
+ max_timeout--;
+ if (max_timeout == 0)
+ {
+ sql_print_error("NDB %s: distributing %s timed out. Ignoring...",
+ type_str, ndb_schema_object->key);
+ break;
+ }
+ if (ndb_extra_logging)
+ ndb_report_waiting(type_str, max_timeout,
+ "distributing", ndb_schema_object->key);
}
- if (ndb_extra_logging)
- sql_print_information("NDB create table: "
- "waiting max %u sec for create table %s.",
- max_timeout, ndb_schema_object->key);
}
(void) pthread_mutex_unlock(&ndb_schema_object->mutex);
}
@@ -1373,6 +1430,10 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
NDB_SHARE *share)
{
DBUG_ENTER("ndb_handle_schema_change");
+ TABLE* table= share->table;
+ TABLE_SHARE *table_share= table->s;
+ const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
bool do_close_cached_tables= FALSE;
bool is_online_alter_table= FALSE;
bool is_rename_table= FALSE;
@@ -1392,70 +1453,68 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
}
}
- if (is_remote_change) /* includes CLUSTER_FAILURE */
+ /*
+ Refresh local dictionary cache by
+ invalidating table and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ (void)strxmov(table_handler.m_dbname, dbname, NullS);
+ (void)strxmov(table_handler.m_tabname, tabname, NullS);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE);
+ thd_ndb->ndb= old_ndb;
+
+ /*
+ Refresh local frm file and dictionary cache if
+ remote on-line alter table
+ */
+ if (is_remote_change && is_online_alter_table)
{
- TABLE* table= share->table;
- TABLE_SHARE *table_share= table->s;
- const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
+ char key[FN_REFLEN];
+ const void *data= 0, *pack_data= 0;
+ uint length, pack_length;
+ int error;
+ NDBDICT *dict= ndb->getDictionary();
+ const NDBTAB *altered_table= pOp->getTable();
- /*
- Invalidate table and all it's indexes
+ DBUG_PRINT("info", ("Detected frm change of table %s.%s",
+ dbname, tabname));
+ build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
+ /*
+ If the frm of the altered table is different than the one on
+ disk then overwrite it with the new table definition
*/
- ndb->setDatabaseName(dbname);
- Thd_ndb *thd_ndb= get_thd_ndb(thd);
- DBUG_ASSERT(thd_ndb != NULL);
- Ndb* old_ndb= thd_ndb->ndb;
- thd_ndb->ndb= ndb;
- ha_ndbcluster table_handler(table_share);
- table_handler.set_dbname(share->key);
- table_handler.set_tabname(share->key);
- table_handler.open_indexes(ndb, table, TRUE);
- table_handler.invalidate_dictionary_cache(TRUE);
- thd_ndb->ndb= old_ndb;
-
- if (is_online_alter_table)
- {
- const char *tabname= table_share->table_name.str;
- char key[FN_REFLEN];
- const void *data= 0, *pack_data= 0;
- uint length, pack_length;
- int error;
- NDBDICT *dict= ndb->getDictionary();
- const NDBTAB *altered_table= pOp->getTable();
-
- DBUG_PRINT("info", ("Detected frm change of table %s.%s",
- dbname, tabname));
- build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
- /*
- If the frm of the altered table is different than the one on
- disk then overwrite it with the new table definition
- */
- if (readfrm(key, &data, &length) == 0 &&
- packfrm(data, length, &pack_data, &pack_length) == 0 &&
- cmp_frm(altered_table, pack_data, pack_length))
+ if (readfrm(key, &data, &length) == 0 &&
+ packfrm(data, length, &pack_data, &pack_length) == 0 &&
+ cmp_frm(altered_table, pack_data, pack_length))
+ {
+ DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
+ altered_table->getFrmLength());
+ pthread_mutex_lock(&LOCK_open);
+ const NDBTAB *old= dict->getTable(tabname);
+ if (!old &&
+ old->getObjectVersion() != altered_table->getObjectVersion())
+ dict->putTable(altered_table);
+
+ if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
+ (error= writefrm(key, data, length)))
{
- DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
- altered_table->getFrmLength());
- pthread_mutex_lock(&LOCK_open);
- const NDBTAB *old= dict->getTable(tabname);
- if (!old &&
- old->getObjectVersion() != altered_table->getObjectVersion())
- dict->putTable(altered_table);
-
- if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
- (error= writefrm(key, data, length)))
- {
- sql_print_information("NDB: Failed write frm for %s.%s, error %d",
- dbname, tabname, error);
- }
- ndbcluster_binlog_close_table(thd, share);
- close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
- if ((error= ndbcluster_binlog_open_table(thd, share,
- table_share, table)))
- sql_print_information("NDB: Failed to re-open table %s.%s",
- dbname, tabname);
- pthread_mutex_unlock(&LOCK_open);
+ sql_print_information("NDB: Failed write frm for %s.%s, error %d",
+ dbname, tabname, error);
}
+ ndbcluster_binlog_close_table(thd, share);
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
+ if ((error= ndbcluster_binlog_open_table(thd, share,
+ table_share, table)))
+ sql_print_information("NDB: Failed to re-open table %s.%s",
+ dbname, tabname);
+ pthread_mutex_unlock(&LOCK_open);
}
}
@@ -1483,6 +1542,21 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
share->table->s->db.length= strlen(share->db);
share->table->s->table_name.str= share->table_name;
share->table->s->table_name.length= strlen(share->table_name);
+ /*
+ Refresh local dictionary cache by invalidating any
+ old table with same name and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ table_handler.set_dbname(share->key);
+ table_handler.set_tabname(share->key);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE);
+ thd_ndb->ndb= old_ndb;
}
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
if (share->op_old == pOp)
@@ -1509,9 +1583,9 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
share= 0;
pOp->setCustomData(0);
-
+
pthread_mutex_lock(&injector_mutex);
- injector_ndb->dropEventOperation(pOp);
+ ndb->dropEventOperation(pOp);
pOp= 0;
pthread_mutex_unlock(&injector_mutex);
@@ -1689,9 +1763,15 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
// skip
break;
case NDBEVENT::TE_CLUSTER_FAILURE:
+ // fall through
case NDBEVENT::TE_DROP:
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
free_share(&schema_share);
schema_share= 0;
+ ndb_binlog_tables_inited= FALSE;
// fall through
case NDBEVENT::TE_ALTER:
ndb_handle_schema_change(thd, ndb, pOp, tmp_share);
@@ -2385,7 +2465,6 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
}
if (!op)
{
- pthread_mutex_unlock(&injector_mutex);
sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
" %s",event_name);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
@@ -2393,6 +2472,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
ndb->getNdbError().code,
ndb->getNdbError().message,
"NDB");
+ pthread_mutex_unlock(&injector_mutex);
DBUG_RETURN(-1);
}
@@ -2494,9 +2574,15 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
get_share(share);
if (do_apply_status_share)
+ {
apply_status_share= get_share(share);
+ (void) pthread_cond_signal(&injector_cond);
+ }
else if (do_schema_share)
+ {
schema_share= get_share(share);
+ (void) pthread_cond_signal(&injector_cond);
+ }
DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u",
share->key, share->op, share->use_count));
@@ -2513,7 +2599,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
*/
int
ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
- NDB_SHARE *share)
+ NDB_SHARE *share, const char *type_str)
{
DBUG_ENTER("ndbcluster_handle_drop_table");
@@ -2569,21 +2655,24 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
{
struct timespec abstime;
set_timespec(abstime, 1);
- (void) pthread_cond_timedwait(&injector_cond,
- &share->mutex,
- &abstime);
- max_timeout--;
+ int ret= pthread_cond_timedwait(&injector_cond,
+ &share->mutex,
+ &abstime);
if (share->op == 0)
break;
- if (max_timeout == 0)
+ if (ret)
{
- sql_print_error("NDB delete table: timed out. Ignoring...");
- break;
+ max_timeout--;
+ if (max_timeout == 0)
+ {
+ sql_print_error("NDB %s: %s timed out. Ignoring...",
+ type_str, share->key);
+ break;
+ }
+ if (ndb_extra_logging)
+ ndb_report_waiting(type_str, max_timeout,
+ type_str, share->key);
}
- if (ndb_extra_logging)
- sql_print_information("NDB delete table: "
- "waiting max %u sec for drop table %s.",
- max_timeout, share->key);
}
(void) pthread_mutex_unlock(&share->mutex);
#else
@@ -2646,7 +2735,8 @@ static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
}
static int
-ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
+ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
+ NdbEventOperation *pOp,
Binlog_index_row &row)
{
NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
@@ -2655,18 +2745,23 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
/* make sure to flush any pending events as they can be dependent
on one of the tables being changed below
*/
- injector_thd->binlog_flush_pending_rows_event(true);
+ thd->binlog_flush_pending_rows_event(true);
switch (type)
{
case NDBEVENT::TE_CLUSTER_FAILURE:
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: cluster failure for %s.", share->key);
if (apply_status_share == share)
{
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
free_share(&apply_status_share);
apply_status_share= 0;
+ ndb_binlog_tables_inited= FALSE;
}
- if (ndb_extra_logging)
- sql_print_information("NDB Binlog: cluster failure for %s.", share->key);
DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
"%s received share: 0x%lx op: %lx share op: %lx "
"op_old: %lx",
@@ -2675,8 +2770,13 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
case NDBEVENT::TE_DROP:
if (apply_status_share == share)
{
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
free_share(&apply_status_share);
apply_status_share= 0;
+ ndb_binlog_tables_inited= FALSE;
}
/* ToDo: remove printout */
if (ndb_extra_logging)
@@ -2702,7 +2802,7 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
return 0;
}
- ndb_handle_schema_change(injector_thd, ndb, pOp, share);
+ ndb_handle_schema_change(thd, ndb, pOp, share);
return 0;
}
@@ -2982,7 +3082,8 @@ static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
pthread_handler_t ndb_binlog_thread_func(void *arg)
{
THD *thd; /* needs to be first for thread_stack */
- Ndb *ndb= 0;
+ Ndb *i_ndb= 0;
+ Ndb *s_ndb= 0;
Thd_ndb *thd_ndb=0;
int ndb_update_binlog_index= 1;
injector *inj= injector::instance();
@@ -3034,16 +3135,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_mutex_unlock(&LOCK_thread_count);
thd->lex->start_transaction_opt= 0;
- if (!(schema_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
- schema_ndb->init())
+ if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
+ s_ndb->init())
{
sql_print_error("NDB Binlog: Getting Schema Ndb object failed");
goto err;
}
// empty database
- if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) ||
- ndb->init())
+ if (!(i_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
+ i_ndb->init())
{
sql_print_error("NDB Binlog: Getting Ndb object failed");
ndb_binlog_thread_running= -1;
@@ -3064,7 +3165,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_mutex_lock(&injector_mutex);
*/
injector_thd= thd;
- injector_ndb= ndb;
+ injector_ndb= i_ndb;
+ schema_ndb= s_ndb;
ndb_binlog_thread_running= 1;
if (opt_bin_log)
{
@@ -3087,7 +3189,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd->proc_info= "Waiting for ndbcluster to start";
pthread_mutex_lock(&injector_mutex);
- while (!ndbcluster_util_inited)
+ while (!schema_share || !apply_status_share)
{
/* ndb not connected yet */
struct timespec abstime;
@@ -3119,10 +3221,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd->db= db;
if (ndb_binlog_running)
open_binlog_index(thd, &binlog_tables, &binlog_index);
- if (!apply_status_share)
- {
- sql_print_error("NDB: Could not get apply status share");
- }
thd->db= db;
}
@@ -3150,14 +3248,14 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
int res= 0, tot_poll_wait= 1000;
if (ndb_binlog_running)
{
- res= ndb->pollEvents(tot_poll_wait, &gci);
+ res= i_ndb->pollEvents(tot_poll_wait, &gci);
tot_poll_wait= 0;
}
- int schema_res= schema_ndb->pollEvents(tot_poll_wait, &schema_gci);
+ int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci);
ndb_latest_received_binlog_epoch= gci;
while (gci > schema_gci && schema_res >= 0)
- schema_res= schema_ndb->pollEvents(10, &schema_gci);
+ schema_res= s_ndb->pollEvents(10, &schema_gci);
if ((abort_loop || do_ndbcluster_binlog_close_connection) &&
(ndb_latest_handled_binlog_epoch >= g_latest_trans_gci ||
@@ -3184,15 +3282,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
if (unlikely(schema_res > 0))
{
- schema_ndb->
+ thd->proc_info= "Processing events from schema table";
+ s_ndb->
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
- schema_ndb->
+ s_ndb->
setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
- NdbEventOperation *pOp= schema_ndb->nextEvent();
+ NdbEventOperation *pOp= s_ndb->nextEvent();
while (pOp != NULL)
{
if (!pOp->hasError())
- ndb_binlog_thread_handle_schema_event(thd, schema_ndb, pOp,
+ ndb_binlog_thread_handle_schema_event(thd, s_ndb, pOp,
&post_epoch_log_list,
&post_epoch_unlock_list,
&mem_root);
@@ -3201,7 +3300,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
"binlog schema event",
(ulong) pOp->getNdbError().code,
pOp->getNdbError().message);
- pOp= schema_ndb->nextEvent();
+ pOp= s_ndb->nextEvent();
}
}
@@ -3213,7 +3312,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
int event_count= 0;
#endif
thd->proc_info= "Processing events";
- NdbEventOperation *pOp= ndb->nextEvent();
+ NdbEventOperation *pOp= i_ndb->nextEvent();
Binlog_index_row row;
while (pOp != NULL)
{
@@ -3224,9 +3323,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName()));
DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch);
- ndb->
+ i_ndb->
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
- ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
+ i_ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
bzero((char*) &row, sizeof(row));
injector::transaction trans;
@@ -3235,7 +3334,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Uint32 iter= 0;
const NdbEventOperation *gci_op;
Uint32 event_types;
- while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
+ while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
!= NULL)
{
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
@@ -3321,7 +3420,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
event_count++;
#endif
if (pOp->hasError() &&
- ndb_binlog_thread_handle_error(ndb, pOp, row) < 0)
+ ndb_binlog_thread_handle_error(i_ndb, pOp, row) < 0)
goto err;
#ifndef DBUG_OFF
@@ -3341,7 +3440,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Uint32 iter= 0;
const NdbEventOperation *gci_op;
Uint32 event_types;
- while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
+ while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
!= NULL)
{
if (gci_op == pOp)
@@ -3353,19 +3452,19 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
#endif
if ((unsigned) pOp->getEventType() <
(unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
- ndb_binlog_thread_handle_data_event(ndb, pOp, row, trans);
+ ndb_binlog_thread_handle_data_event(i_ndb, pOp, row, trans);
else
{
// set injector_ndb database/schema from table internal name
int ret=
- ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
+ i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
DBUG_ASSERT(ret == 0);
- ndb_binlog_thread_handle_non_data_event(ndb, pOp, row);
+ ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
// reset to catch errors
- ndb->setDatabaseName("");
+ i_ndb->setDatabaseName("");
}
- pOp= ndb->nextEvent();
+ pOp= i_ndb->nextEvent();
} while (pOp && pOp->getGCI() == gci);
/*
@@ -3379,6 +3478,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
if (trans.good())
{
//DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes);
+ thd->proc_info= "Committing events to binlog";
injector::transaction::binlog_pos start= trans.start_pos();
if (int r= trans.commit())
{
@@ -3418,10 +3518,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
}
err:
DBUG_PRINT("info",("Shutting down cluster binlog thread"));
+ thd->proc_info= "Shutting down";
close_thread_tables(thd);
pthread_mutex_lock(&injector_mutex);
/* don't mess with the injector_ndb anymore from other threads */
+ injector_thd= 0;
injector_ndb= 0;
+ schema_ndb= 0;
pthread_mutex_unlock(&injector_mutex);
thd->db= 0; // as not to try to free memory
sql_print_information("Stopping Cluster Binlog");
@@ -3438,21 +3541,45 @@ err:
}
/* remove all event operations */
- if (ndb)
+ if (s_ndb)
{
NdbEventOperation *op;
DBUG_PRINT("info",("removing all event operations"));
- while ((op= ndb->getEventOperation()))
+ while ((op= s_ndb->getEventOperation()))
{
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
DBUG_PRINT("info",("removing event operation on %s",
op->getEvent()->getName()));
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
+ DBUG_ASSERT(share != 0);
+ DBUG_ASSERT(share->op == op ||
+ share->op_old == op);
+ share->op= share->op_old= 0;
free_share(&share);
- ndb->dropEventOperation(op);
+ s_ndb->dropEventOperation(op);
+ }
+ delete s_ndb;
+ s_ndb= 0;
+ }
+ if (i_ndb)
+ {
+ NdbEventOperation *op;
+ DBUG_PRINT("info",("removing all event operations"));
+ while ((op= i_ndb->getEventOperation()))
+ {
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
+ DBUG_PRINT("info",("removing event operation on %s",
+ op->getEvent()->getName()));
+ NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
+ DBUG_ASSERT(share != 0);
+ DBUG_ASSERT(share->op == op ||
+ share->op_old == op);
+ share->op= share->op_old= 0;
+ free_share(&share);
+ i_ndb->dropEventOperation(op);
}
- delete ndb;
- ndb= 0;
+ delete i_ndb;
+ i_ndb= 0;
}
hash_free(&ndb_schema_objects);
diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h
index fda025842a0..9d15016568b 100644
--- a/sql/ha_ndbcluster_binlog.h
+++ b/sql/ha_ndbcluster_binlog.h
@@ -101,7 +101,8 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
const char *old_db= 0,
const char *old_table_name= 0);
int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
- NDB_SHARE *share);
+ NDB_SHARE *share,
+ const char *type_str);
void ndb_rep_event_name(String *event_name,
const char *db, const char *tbl);
int ndb_create_table_from_engine(THD *thd, const char *db,
@@ -112,12 +113,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg);
/*
table cluster_replication.apply_status
*/
-void ndbcluster_setup_binlog_table_shares(THD *thd);
+int ndbcluster_setup_binlog_table_shares(THD *thd);
extern NDB_SHARE *apply_status_share;
extern NDB_SHARE *schema_share;
extern THD *injector_thd;
extern my_bool ndb_binlog_running;
+extern my_bool ndb_binlog_tables_inited;
bool
ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 7cf841a5d71..3ee9a2954eb 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -562,6 +562,7 @@ int ha_partition::rename_table(const char *from, const char *to)
SYNOPSIS
create_handler_files()
name Full path of table name
+ create_info Create info generated for CREATE TABLE
RETURN VALUE
>0 Error
@@ -575,7 +576,8 @@ int ha_partition::rename_table(const char *from, const char *to)
and types of engines in the partitions.
*/
-int ha_partition::create_handler_files(const char *name)
+int ha_partition::create_handler_files(const char *name,
+ HA_CREATE_INFO *create_info)
{
DBUG_ENTER("ha_partition::create_handler_files()");
@@ -1135,7 +1137,6 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
partition_element *part_elem= part_it++;
if (all_parts || part_elem->part_state == PART_CHANGED)
{
- handler *file;
if (m_is_sub_partitioned)
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
@@ -2311,7 +2312,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
err_handler:
while (file-- != m_file)
(*file)->close();
-err:
+
DBUG_RETURN(error);
}
@@ -2915,7 +2916,6 @@ int ha_partition::rnd_init(bool scan)
int error;
uint i= 0;
uint32 part_id;
- handler **file;
DBUG_ENTER("ha_partition::rnd_init");
include_partition_fields_in_used_fields();
@@ -4201,11 +4201,7 @@ void ha_partition::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
- /*
- The auto increment value is only maintained by the first handler
- so we will only call this.
- */
- m_file[0]->info(HA_STATUS_AUTO);
+ auto_increment_value= get_auto_increment();
}
if (flag & HA_STATUS_VARIABLE)
{
@@ -5349,9 +5345,15 @@ void ha_partition::restore_auto_increment()
ulonglong ha_partition::get_auto_increment()
{
+ ulonglong auto_inc, max_auto_inc= 0;
DBUG_ENTER("ha_partition::get_auto_increment");
- DBUG_RETURN(m_file[0]->get_auto_increment());
+ for (uint i= 0; i < m_tot_parts; i++)
+ {
+ auto_inc= m_file[i]->get_auto_increment();
+ set_if_bigger(max_auto_inc, auto_inc);
+ }
+ DBUG_RETURN(max_auto_inc);
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 96b615df71a..ecaa7e1e8fa 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -179,7 +179,8 @@ public:
virtual int rename_table(const char *from, const char *to);
virtual int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info);
- virtual int create_handler_files(const char *name);
+ virtual int create_handler_files(const char *name,
+ HA_CREATE_INFO *create_info);
virtual void update_create_info(HA_CREATE_INFO *create_info);
virtual char *update_table_comment(const char *comment);
virtual int change_partitions(HA_CREATE_INFO *create_info,
diff --git a/sql/handler.h b/sql/handler.h
index 261a813bbfa..e93fdfe67e3 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1338,6 +1338,7 @@ public:
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
+ virtual void prepare_for_alter() { return; }
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
{ return (HA_ERR_WRONG_COMMAND); }
virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
@@ -1378,7 +1379,8 @@ public:
virtual void drop_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
- virtual int create_handler_files(const char *name) { return FALSE;}
+ virtual int create_handler_files(const char *name, HA_CREATE_INFO *info)
+ { return FALSE;}
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
diff --git a/sql/hostname.cc b/sql/hostname.cc
index c5c337080cf..52c4107372f 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -26,7 +26,7 @@
#ifdef __cplusplus
extern "C" { // Because of SCO 3.2V4.2
#endif
-#if !defined( __WIN__) && !defined(OS2)
+#if !defined( __WIN__)
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 65654b150e1..acee912c912 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -52,11 +52,10 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
{
uint i;
Field *field= NULL;
- bool all_constant= TRUE;
/* If the first argument is a FIELD_ITEM, pull out the field. */
- if (items[0]->type() == Item::FIELD_ITEM)
- field=((Item_field *)items[0])->field;
+ if (items[0]->real_item()->type() == Item::FIELD_ITEM)
+ field=((Item_field *)(items[0]->real_item()))->field;
/* But if it can't be compared as a longlong, we don't really care. */
if (field && !field->can_be_compared_as_longlong())
field= NULL;
@@ -65,16 +64,9 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
for (i= 1; i < nitems; i++)
{
type[0]= item_cmp_type(type[0], items[i]->result_type());
- if (field && !convert_constant_item(thd, field, &items[i]))
- all_constant= FALSE;
+ if (field && convert_constant_item(thd, field, &items[i]))
+ type[0]= INT_RESULT;
}
-
- /*
- If we had a field that can be compared as a longlong, and all constant
- items, then the aggregate result will be an INT_RESULT.
- */
- if (field && all_constant)
- type[0]= INT_RESULT;
}
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 75c3f8a2922..f5c8d511025 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -26,7 +26,7 @@
*/
Item_row::Item_row(List<Item> &arg):
- Item(), used_tables_cache(0), array_holder(1), const_item_cache(1), with_null(0)
+ Item(), used_tables_cache(0), const_item_cache(1), with_null(0)
{
//TODO: think placing 2-3 component items in item (as it done for function)
@@ -85,6 +85,20 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
}
+void Item_row::cleanup()
+{
+ DBUG_ENTER("Item_row::cleanup");
+
+ Item::cleanup();
+ /* Reset to the original values */
+ used_tables_cache= 0;
+ const_item_cache= 1;
+ with_null= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
diff --git a/sql/item_row.h b/sql/item_row.h
index 6fbe7436b72..d6dd4371372 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -19,7 +19,6 @@ class Item_row: public Item
Item **items;
table_map used_tables_cache;
uint arg_count;
- bool array_holder;
bool const_item_cache;
bool with_null;
public:
@@ -29,7 +28,6 @@ public:
items(item->items),
used_tables_cache(item->used_tables_cache),
arg_count(item->arg_count),
- array_holder(0),
const_item_cache(item->const_item_cache),
with_null(0)
{}
@@ -62,6 +60,7 @@ public:
return 0;
};
bool fix_fields(THD *thd, Item **ref);
+ void cleanup();
void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields);
table_map used_tables() const { return used_tables_cache; };
bool const_item() const { return const_item_cache; };
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 99cb7078eba..6fbd6db1a89 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -2002,6 +2002,41 @@ longlong Item_date_add_interval::val_int()
((date*100L + ltime.hour)*100L+ ltime.minute)*100L + ltime.second;
}
+
+
+bool Item_date_add_interval::eq(const Item *item, bool binary_cmp) const
+{
+ INTERVAL interval, other_interval;
+ String val= value; // Because of const
+
+ if (this == item)
+ return TRUE;
+
+ if ((item->type() != FUNC_ITEM) ||
+ (arg_count != ((Item_func*) item)->arg_count) ||
+ (func_name() != ((Item_func*) item)->func_name()))
+ return FALSE;
+
+ Item_date_add_interval *other= (Item_date_add_interval*) item;
+
+ if ((int_type != other->int_type) ||
+ (!args[0]->eq(other->args[0], binary_cmp)) ||
+ (get_interval_value(args[1], int_type, &val, &interval)))
+ return FALSE;
+
+ val= other->value;
+
+ if ((get_interval_value(other->args[1], other->int_type, &val,
+ &other_interval)) ||
+ ((date_sub_interval ^ interval.neg) ^
+ (other->date_sub_interval ^ other_interval.neg)))
+ return FALSE;
+
+ // Assume comparing same types here due to earlier check
+ return memcmp(&interval, &other_interval, sizeof(INTERVAL)) == 0;
+}
+
+
static const char *interval_names[]=
{
"year", "quarter", "month", "day", "hour",
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index ffe049873fc..a42232fd0fc 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -667,6 +667,7 @@ public:
double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); }
longlong val_int();
bool get_date(TIME *res, uint fuzzy_date);
+ bool eq(const Item *item, bool binary_cmp) const;
void print(String *str);
virtual bool check_partition_func_processor(byte *bool_arg) { return 0;}
};
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 91f958d5b70..a245e3b1b33 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -1133,6 +1133,13 @@ static Item *create_func_number(MY_XPATH *xpath, Item **args, uint nargs)
}
+static Item *create_func_string_length(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ Item *arg= nargs ? args[0] : xpath->context;
+ return arg ? new Item_func_char_length(arg) : 0;
+}
+
+
static Item *create_func_round(MY_XPATH *xpath, Item **args, uint nargs)
{
return new Item_func_round(args[0], new Item_int((char*)"0",0,1),0);
@@ -1246,6 +1253,7 @@ static MY_XPATH_FUNC my_func_names[] =
{"local-name" , 10 , 0 , 1 , 0},
{"starts-with" , 11 , 2 , 2 , 0},
{"namespace-uri" , 13 , 0 , 1 , 0},
+ {"string-length" , 13 , 0 , 1 , create_func_string_length},
{"substring-after" , 15 , 2 , 2 , 0},
{"normalize-space" , 15 , 0 , 1 , 0},
{"substring-before" , 16 , 2 , 2 , 0},
@@ -1304,30 +1312,6 @@ my_xpath_init(MY_XPATH *xpath)
}
-/*
- Some ctype-alike helper functions. Note, we cannot
- reuse cs->ident_map[], because in Xpath, unlike in SQL,
- dash character is a valid identifier part.
-*/
-static int
-my_xident_beg(int c)
-{
- return (((c) >= 'a' && (c) <= 'z') ||
- ((c) >= 'A' && (c) <= 'Z') ||
- ((c) == '_'));
-}
-
-
-static int
-my_xident_body(int c)
-{
- return (((c) >= 'a' && (c) <= 'z') ||
- ((c) >= 'A' && (c) <= 'Z') ||
- ((c) >= '0' && (c) <= '9') ||
- ((c)=='-') || ((c) == '_'));
-}
-
-
static int
my_xdigit(int c)
{
@@ -1350,7 +1334,7 @@ static void
my_xpath_lex_scan(MY_XPATH *xpath,
MY_XPATH_LEX *lex, const char *beg, const char *end)
{
- int ch;
+ int ch, ctype, length;
for ( ; beg < end && *beg == ' ' ; beg++); // skip leading spaces
lex->beg= beg;
@@ -1360,20 +1344,20 @@ my_xpath_lex_scan(MY_XPATH *xpath,
lex->term= MY_XPATH_LEX_EOF; // end of line reached
return;
}
- ch= *beg++;
-
- if (ch > 0 && ch < 128 && simpletok[ch])
- {
- // a token consisting of one character found
- lex->end= beg;
- lex->term= ch;
- return;
- }
-
- if (my_xident_beg(ch)) // ident, or a function call, or a keyword
+
+ // Check ident, or a function call, or a keyword
+ if ((length= xpath->cs->cset->ctype(xpath->cs, &ctype,
+ (const uchar*) beg,
+ (const uchar*) end)) > 0 &&
+ ((ctype & (_MY_L | _MY_U)) || *beg == '_'))
{
- // scan until the end of the identifier
- for ( ; beg < end && my_xident_body(*beg); beg++);
+ // scan untill the end of the idenfitier
+ for (beg+= length;
+ (length= xpath->cs->cset->ctype(xpath->cs, &ctype,
+ (const uchar*) beg,
+ (const uchar*) end)) > 0 &&
+ ((ctype & (_MY_L | _MY_U | _MY_NMR)) || *beg == '_' || *beg == '-') ;
+ beg+= length) /* no op */;
lex->end= beg;
// check if a function call
@@ -1388,6 +1372,18 @@ my_xpath_lex_scan(MY_XPATH *xpath,
return;
}
+
+ ch= *beg++;
+
+ if (ch > 0 && ch < 128 && simpletok[ch])
+ {
+ // a token consisting of one character found
+ lex->end= beg;
+ lex->term= ch;
+ return;
+ }
+
+
if (my_xdigit(ch)) // a sequence of digits
{
for ( ; beg < end && my_xdigit(*beg) ; beg++);
@@ -1849,7 +1845,11 @@ static int my_xpath_parse_FunctionCall(MY_XPATH *xpath)
for (nargs= 0 ; nargs < func->maxargs; )
{
if (!my_xpath_parse_Expr(xpath))
- return 0;
+ {
+ if (nargs < func->minargs)
+ return 0;
+ goto right_paren;
+ }
args[nargs++]= xpath->item;
if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_COMMA))
{
@@ -1859,6 +1859,8 @@ static int my_xpath_parse_FunctionCall(MY_XPATH *xpath)
break;
}
}
+
+right_paren:
if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_RP))
return 0;
diff --git a/sql/key.cc b/sql/key.cc
index fd9e8a89b17..a407fff4840 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -192,7 +192,8 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
Field_bit *field= (Field_bit *) (key_part->field);
if (field->bit_len)
{
- uchar bits= *(from_key + key_part->length - field->field_length -1);
+ uchar bits= *(from_key + key_part->length -
+ field->pack_length_in_rec() - 1);
set_rec_bits(bits, to_record + key_part->null_offset +
(key_part->null_bit == 128),
field->bit_ofs, field->bit_len);
diff --git a/sql/lex.h b/sql/lex.h
index 9f0c6c4f8b7..d22a56034c5 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -390,9 +390,11 @@ static SYMBOL symbols[] = {
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
{ "PARSER", SYM(PARSER_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
+#ifdef WITH_PARTITION_STORAGE_ENGINE
{ "PARTITION", SYM(PARTITION_SYM)},
{ "PARTITIONING", SYM(PARTITIONING_SYM)},
{ "PARTITIONS", SYM(PARTITIONS_SYM)},
+#endif
{ "PASSWORD", SYM(PASSWORD)},
{ "PHASE", SYM(PHASE_SYM)},
{ "PLUGIN", SYM(PLUGIN_SYM)},
diff --git a/sql/log.cc b/sql/log.cc
index b93b5319ffb..818abe89e02 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -18,10 +18,6 @@
/* logging of commands */
/* TODO: Abort logging when we get an error in reading or writing log files */
-#ifdef __EMX__
-#include <io.h>
-#endif
-
#include "mysql_priv.h"
#include "sql_repl.h"
#include "rpl_filter.h"
@@ -680,20 +676,36 @@ bool LOGGER::flush_logs(THD *thd)
/* reopen log files */
file_log_handler->flush();
- /*
- this will lock and wait for all but the logger thread to release the
- tables. Then we could reopen log tables. Then release the name locks.
- */
- lock_and_wait_for_table_name(thd, &close_slow_log);
- lock_and_wait_for_table_name(thd, &close_general_log);
+ /* flush tables, in the case they are enabled */
+ if (logger.is_log_tables_initialized)
+ {
+ /*
+ This will lock and wait for all but the logger thread to release the
+ tables. Then we could reopen log tables. Then release the name locks.
+
+ NOTE: in fact, the first parameter used in lock_and_wait_for_table_name()
+ and table_log_handler->flush() could be any non-NULL THD, as the
+ underlying code makes certain assumptions about this.
+ Here we use one of the logger handler THD's. Simply because it
+ seems appropriate.
+ */
+ lock_and_wait_for_table_name(table_log_handler->general_log_thd,
+ &close_slow_log);
+ lock_and_wait_for_table_name(table_log_handler->general_log_thd,
+ &close_general_log);
- /* deny others from logging to general and slow log, while reopening tables */
- logger.lock();
+ /*
+ Deny others from logging to general and slow log,
+ while reopening tables.
+ */
+ logger.lock();
- table_log_handler->flush(thd, &close_slow_log, &close_general_log);
+ table_log_handler->flush(table_log_handler->general_log_thd,
+ &close_slow_log, &close_general_log);
- /* end of log tables flush */
- logger.unlock();
+ /* end of log tables flush */
+ logger.unlock();
+ }
return FALSE;
}
@@ -3122,7 +3134,11 @@ DBUG_skip_commit:
rotate binlog, if necessary.
*/
if (commit_event->get_type_code() == XID_EVENT)
- thread_safe_increment(prepared_xids, &LOCK_prep_xids);
+ {
+ pthread_mutex_lock(&LOCK_prep_xids);
+ prepared_xids++;
+ pthread_mutex_unlock(&LOCK_prep_xids);
+ }
else
rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED);
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 8a39b1fc4eb..d51a0ef4c9f 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -6579,18 +6579,6 @@ int Delete_rows_log_event::do_before_row_operations(TABLE *table)
if (!m_memory)
return HA_ERR_OUT_OF_MEM;
- if (table->s->keys > 0)
- {
- /* We have a key: search the table using the index */
- if (!table->file->inited)
- error= table->file->ha_index_init(0, FALSE);
- }
- else
- {
- /* We doesn't have a key: search the table using rnd_next() */
- error= table->file->ha_rnd_init(1);
- }
-
return error;
}
@@ -6638,6 +6626,20 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
{
DBUG_ASSERT(table != NULL);
+ if (table->s->keys > 0)
+ {
+ /* We have a key: search the table using the index */
+ if (!table->file->inited)
+ if (int error= table->file->ha_index_init(0, FALSE))
+ return error;
+ }
+ else
+ {
+ /* We doesn't have a key: search the table using rnd_next() */
+ if (int error= table->file->ha_rnd_init(1))
+ return error;
+ }
+
int error= find_and_fetch_row(table, m_key);
if (error)
return error;
@@ -6649,6 +6651,11 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
*/
error= table->file->ha_delete_row(table->record[0]);
+ /*
+ Have to restart the scan to be able to fetch the next row.
+ */
+ table->file->ha_index_or_rnd_end();
+
return error;
}
diff --git a/sql/log_event.h b/sql/log_event.h
index d6ce354fbdf..b24686514e3 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -18,10 +18,6 @@
#ifndef _log_event_h
#define _log_event_h
-#ifdef __EMX__
-#undef write // remove pthread.h macro definition, conflict with write() class member
-#endif
-
#if defined(USE_PRAGMA_INTERFACE) && !defined(MYSQL_CLIENT)
#pragma interface /* gcc class implementation */
#endif
diff --git a/sql/my_lock.c b/sql/my_lock.c
index 7f47256703a..69884df22f8 100644
--- a/sql/my_lock.c
+++ b/sql/my_lock.c
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#if defined(__EMX__) || defined(__NETWARE__)
+#if defined(__NETWARE__)
#include "../mysys/my_lock.c"
#else
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index ac7957d41f1..5773f0476a9 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -36,10 +36,6 @@
#include "sql_bitmap.h"
#include "sql_array.h"
-#ifdef __EMX__
-#undef write /* remove pthread.h macro definition for EMX */
-#endif
-
/* TODO convert all these three maps to Bitmap classes */
typedef ulonglong table_map; /* Used for table bits in join */
#if MAX_INDEXES <= 64
@@ -206,7 +202,7 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
#define IF_NETWARE(A,B) (B)
#endif
-#if defined(__WIN__) || defined(OS2)
+#if defined(__WIN__)
#define IF_WIN(A,B) (A)
#undef FLUSH_TIME
#define FLUSH_TIME 1800 /* Flush every half hour */
@@ -1300,6 +1296,7 @@ extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
extern ulong what_to_log,flush_time;
extern ulong query_buff_size, thread_stack;
+extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit;
extern ulong max_binlog_size, max_relay_log_size;
#ifdef HAVE_ROW_BASED_REPLICATION
@@ -1354,6 +1351,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
LOCK_global_system_variables, LOCK_user_conn,
+ LOCK_prepared_stmt_count,
LOCK_bytes_sent, LOCK_bytes_received;
#ifdef HAVE_OPENSSL
extern pthread_mutex_t LOCK_des_key_file;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 71067630535..73bd0d57eb3 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -95,9 +95,7 @@ extern "C" { // Because of SCO 3.2V4.2
#endif
#include <my_net.h>
-#if defined(OS2)
-# include <sys/un.h>
-#elif !defined(__WIN__)
+#if !defined(__WIN__)
# ifndef __NETWARE__
#include <sys/resource.h>
# endif /* __NETWARE__ */
@@ -502,6 +500,22 @@ ulong specialflag=0;
ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
uint max_user_connections= 0;
+/*
+ Limit of the total number of prepared statements in the server.
+ Is necessary to protect the server against out-of-memory attacks.
+*/
+ulong max_prepared_stmt_count;
+/*
+ Current total number of prepared statements in the server. This number
+ is exact, and therefore may not be equal to the difference between
+ `com_stmt_prepare' and `com_stmt_close' (global status variables), as
+ the latter ones account for all registered attempts to prepare
+ a statement (including unsuccessful ones). Prepared statements are
+ currently connection-local: if the same SQL query text is prepared in
+ two different connections, this counts as two distinct prepared
+ statements.
+*/
+ulong prepared_stmt_count=0;
ulong thread_id=1L,current_pid;
ulong slow_launch_threads = 0, sync_binlog_period;
ulong expire_logs_days = 0;
@@ -579,6 +593,14 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi;
+/*
+ The below lock protects access to two global server variables:
+ max_prepared_stmt_count and prepared_stmt_count. These variables
+ set the limit and hold the current total number of prepared statements
+ in the server, respectively. As PREPARE/DEALLOCATE rate in a loaded
+ server may be fairly high, we need a dedicated lock.
+*/
+pthread_mutex_t LOCK_prepared_stmt_count;
#ifdef HAVE_OPENSSL
pthread_mutex_t LOCK_des_key_file;
#endif
@@ -648,10 +670,6 @@ static SECURITY_DESCRIPTOR sdPipeDescriptor;
static HANDLE hPipe = INVALID_HANDLE_VALUE;
#endif
-#ifdef OS2
-pthread_cond_t eventShutdown;
-#endif
-
#ifndef EMBEDDED_LIBRARY
bool mysqld_embedded=0;
#else
@@ -751,7 +769,7 @@ static void close_connections(void)
(void) pthread_mutex_unlock(&LOCK_manager);
/* kill connection thread */
-#if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_PRINT("quit",("waiting for select thread: 0x%lx",select_thread));
(void) pthread_mutex_lock(&LOCK_thread_count);
@@ -980,8 +998,6 @@ void kill_mysql(void)
*/
}
#endif
-#elif defined(OS2)
- pthread_cond_signal(&eventShutdown); // post semaphore
#elif defined(HAVE_PTHREAD_KILL)
if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL))
{
@@ -1007,7 +1023,7 @@ void kill_mysql(void)
/* Force server down. kill all connections and threads and exit */
-#if defined(OS2) || defined(__NETWARE__)
+#if defined(__NETWARE__)
extern "C" void kill_server(int sig_ptr)
#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN
#elif !defined(__WIN__)
@@ -1044,7 +1060,7 @@ static void __cdecl kill_server(int sig_ptr)
}
#endif
-#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2))
+#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__))
my_thread_init(); // If this is a new thread
#endif
close_connections();
@@ -1082,7 +1098,7 @@ extern "C" sig_handler print_signal_warning(int sig)
#ifdef DONT_REMEMBER_SIGNAL
my_sigset(sig,print_signal_warning); /* int. thread system calls */
#endif
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
if (sig == SIGALRM)
alarm(2); /* reschedule alarm */
#endif
@@ -1296,6 +1312,7 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_global_system_variables);
(void) pthread_mutex_destroy(&LOCK_global_read_lock);
(void) pthread_mutex_destroy(&LOCK_uuid_generator);
+ (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count);
(void) pthread_cond_destroy(&COND_thread_count);
(void) pthread_cond_destroy(&COND_refresh);
(void) pthread_cond_destroy(&COND_thread_cache);
@@ -1336,7 +1353,7 @@ static void set_ports()
static struct passwd *check_user(const char *user)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
struct passwd *user_info;
uid_t user_id= geteuid();
@@ -1390,7 +1407,7 @@ err:
static void set_user(const char *user, struct passwd *user_info)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_ASSERT(user_info != 0);
#ifdef HAVE_INITGROUPS
/*
@@ -1419,7 +1436,7 @@ static void set_user(const char *user, struct passwd *user_info)
static void set_effective_user(struct passwd *user_info)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_ASSERT(user_info != 0);
if (setregid((gid_t)-1, user_info->pw_gid) == -1)
{
@@ -1439,7 +1456,7 @@ static void set_effective_user(struct passwd *user_info)
static void set_root(const char *path)
{
-#if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
if (chroot(path) == -1)
{
sql_perror("chroot");
@@ -1786,7 +1803,7 @@ extern "C" sig_handler abort_thread(int sig __attribute__((unused)))
the signal thread is ready before continuing
******************************************************************************/
-#if defined(__WIN__) || defined(OS2)
+#if defined(__WIN__)
static void init_signals(void)
{
int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ;
@@ -2046,44 +2063,7 @@ static void check_data_home(const char *path)
{
}
-#elif defined(__EMX__)
-static void sig_reload(int signo)
-{
- // Flush everything
- bool not_used;
- reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0, &not_used);
- signal(signo, SIG_ACK);
-}
-
-static void sig_kill(int signo)
-{
- if (!kill_in_progress)
- {
- abort_loop=1; // mark abort for threads
- kill_server((void*) signo);
- }
- signal(signo, SIG_ACK);
-}
-
-static void init_signals(void)
-{
- signal(SIGQUIT, sig_kill);
- signal(SIGKILL, sig_kill);
- signal(SIGTERM, sig_kill);
- signal(SIGINT, sig_kill);
- signal(SIGHUP, sig_reload); // Flush everything
- signal(SIGALRM, SIG_IGN);
- signal(SIGBREAK,SIG_IGN);
- signal_thread = pthread_self();
-}
-
-static void start_signal_handler(void)
-{}
-
-static void check_data_home(const char *path)
-{}
-
-#else /* if ! __WIN__ && ! __EMX__ */
+#else /* if ! __WIN__ */
#ifdef HAVE_LINUXTHREADS
#define UNSAFE_DEFAULT_LINUX_THREADS 200
@@ -2556,33 +2536,6 @@ int STDCALL handle_kill(ulong ctrl_type)
}
#endif
-
-#ifdef OS2
-pthread_handler_t handle_shutdown(void *arg)
-{
- my_thread_init();
-
- // wait semaphore
- pthread_cond_wait(&eventShutdown, NULL);
-
- // close semaphore and kill server
- pthread_cond_destroy(&eventShutdown);
-
- /*
- Exit main loop on main thread, so kill will be done from
- main thread (this is thread 2)
- */
- abort_loop = 1;
-
- // unblock select()
- so_cancel(ip_sock);
- so_cancel(unix_sock);
-
- return 0;
-}
-#endif
-
-
static const char *load_default_groups[]= {
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
"mysql_cluster",
@@ -2650,14 +2603,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
return 1;
mysql_init_variables();
-#ifdef OS2
- {
- // fix timezone for daylight saving
- struct tm *ts = localtime(&start_time);
- if (ts->tm_isdst > 0)
- _timezone -= 3600;
- }
-#endif
#ifdef HAVE_TZNAME
{
struct tm tm_tmp;
@@ -2890,6 +2835,7 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
#ifdef HAVE_OPENSSL
(void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
@@ -3360,12 +3306,6 @@ static void create_shutdown_thread()
// On "Stop Service" we have to do regular shutdown
Service.SetShutdownEvent(hEventShutdown);
#endif
-#ifdef OS2
- pthread_cond_init(&eventShutdown, NULL);
- pthread_t hThread;
- if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0))
- sql_print_warning("Can't create thread to handle shutdown requests");
-#endif
#endif // EMBEDDED_LIBRARY
}
@@ -4292,10 +4232,6 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
create_new_thread(thd);
}
-#ifdef OS2
- // kill server must be invoked from thread 1!
- kill_server(MYSQL_KILL_SIGNAL);
-#endif
decrement_handler_count();
DBUG_RETURN(0);
}
@@ -4724,7 +4660,8 @@ enum options_mysqld
OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS,
OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE,
- OPT_MAX_JOIN_SIZE, OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
+ OPT_MAX_JOIN_SIZE, OPT_MAX_PREPARED_STMT_COUNT,
+ OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS,
OPT_MAX_LENGTH_FOR_SORT_DATA,
OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE,
@@ -5666,7 +5603,7 @@ log and this option does nothing anymore.",
0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files. Several paths may be specified, separated by a "
-#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
"semicolon (;)"
#else
"colon (:)"
@@ -5980,6 +5917,10 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.max_length_for_sort_data,
(gptr*) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
+ {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT,
+ "Maximum numbrer of prepared statements in the server.",
+ (gptr*) &max_prepared_stmt_count, (gptr*) &max_prepared_stmt_count,
+ 0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0},
{"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE,
"If non-zero: relay log will be rotated automatically when the size exceeds this value; if zero (the default): when the size exceeds max_binlog_size. 0 excepted, the minimum value for this variable is 4096.",
(gptr*) &max_relay_log_size, (gptr*) &max_relay_log_size, 0, GET_ULONG,
@@ -6864,6 +6805,10 @@ SHOW_VAR status_vars[]= {
static void print_version(void)
{
set_server_version();
+ /*
+ Note: the instance manager keys off the string 'Ver' so it can find the
+ version from the output of 'mysqld --version', so don't change it!
+ */
printf("%s Ver %s for %s on %s (%s)\n",my_progname,
server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT);
}
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index c80bb8bad9a..9713e4bed44 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -135,7 +135,7 @@ my_bool my_net_init(NET *net, Vio* vio)
if (vio != 0) /* If real connection */
{
net->fd = vio_fd(vio); /* For perl DBI/DBD */
-#if defined(MYSQL_SERVER) && !defined(__WIN__) && !defined(__EMX__) && !defined(OS2)
+#if defined(MYSQL_SERVER) && !defined(__WIN__)
if (!(test_flags & TEST_BLOCKING))
{
my_bool old_mode;
@@ -604,7 +604,7 @@ net_real_write(NET *net,const char *packet,ulong len)
if ((long) (length=vio_write(net->vio,pos,(uint32) (end-pos))) <= 0)
{
my_bool interrupted = vio_should_retry(net->vio);
-#if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2))
+#if !defined(__WIN__)
if ((interrupted || length==0) && !thr_alarm_in_use(&alarmed))
{
if (!thr_alarm(&alarmed,(uint) net->write_timeout,&alarm_buff))
@@ -631,7 +631,7 @@ net_real_write(NET *net,const char *packet,ulong len)
}
}
else
-#endif /* (!defined(__WIN__) && !defined(__EMX__)) */
+#endif /* !defined(__WIN__) */
if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) &&
interrupted)
{
@@ -803,7 +803,7 @@ my_real_read(NET *net, ulong *complen)
DBUG_PRINT("info",("vio_read returned %d, errno: %d",
length, vio_errno(net->vio)));
-#if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2)) || defined(MYSQL_SERVER)
+#if !defined(__WIN__) || defined(MYSQL_SERVER)
/*
We got an error that there was no data on the socket. We now set up
an alarm to not 'read forever', change the socket to non blocking
@@ -839,7 +839,7 @@ my_real_read(NET *net, ulong *complen)
continue;
}
}
-#endif /* (!defined(__WIN__) && !defined(__EMX__)) || defined(MYSQL_SERVER) */
+#endif /* (!defined(__WIN__) || defined(MYSQL_SERVER) */
if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) &&
interrupted)
{ /* Probably in MIT threads */
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 42f723eb382..3fddd780171 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -4370,7 +4370,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
sizeof(ROR_SCAN_INFO*)*
best_num)))
DBUG_RETURN(NULL);
- memcpy(trp->first_scan, ror_scan_mark, best_num*sizeof(ROR_SCAN_INFO*));
+ memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*));
trp->last_scan= trp->first_scan + best_num;
trp->is_covering= TRUE;
trp->read_cost= total_cost;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index c957efa71fb..a62b58a04ac 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -708,6 +708,12 @@ bool partition_info::check_partition_info(handlerton **eng_type,
partition_element *part_elem= part_it++;
if (!is_sub_partitioned())
{
+ if (check_table_name(part_elem->partition_name,
+ strlen(part_elem->partition_name)))
+ {
+ my_error(ER_WRONG_PARTITION_NAME, MYF(0));
+ goto end;
+ }
if (part_elem->engine_type == NULL)
part_elem->engine_type= default_engine_type;
DBUG_PRINT("info", ("engine = %d",
@@ -721,6 +727,12 @@ bool partition_info::check_partition_info(handlerton **eng_type,
do
{
part_elem= sub_it++;
+ if (check_table_name(part_elem->partition_name,
+ strlen(part_elem->partition_name)))
+ {
+ my_error(ER_WRONG_PARTITION_NAME, MYF(0));
+ goto end;
+ }
if (part_elem->engine_type == NULL)
part_elem->engine_type= default_engine_type;
DBUG_PRINT("info", ("engine = %u",
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 34dcd80a236..e207a0bf633 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -83,7 +83,7 @@ static int init_failsafe_rpl_thread(THD* thd)
DBUG_RETURN(-1);
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
diff --git a/sql/set_var.cc b/sql/set_var.cc
index f2694f651f4..59e0c7b6ff7 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -161,6 +161,7 @@ static KEY_CACHE *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
+static byte *get_prepared_stmt_count(THD *thd);
/*
Variable definition list
@@ -311,6 +312,10 @@ sys_var_thd_ha_rows sys_sql_max_join_size("sql_max_join_size",
&SV::max_join_size,
fix_max_join_size);
#endif
+static sys_var_long_ptr_global
+sys_max_prepared_stmt_count("max_prepared_stmt_count",
+ &max_prepared_stmt_count,
+ &LOCK_prepared_stmt_count);
sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size",
&max_relay_log_size,
fix_max_relay_log_size);
@@ -604,6 +609,9 @@ static sys_var_readonly sys_warning_count("warning_count",
OPT_SESSION,
SHOW_LONG,
get_warning_count);
+static sys_var_readonly sys_prepared_stmt_count("prepared_stmt_count",
+ OPT_GLOBAL, SHOW_LONG,
+ get_prepared_stmt_count);
/* alias for last_insert_id() to be compatible with Sybase */
#ifdef HAVE_REPLICATION
@@ -847,6 +855,8 @@ SHOW_VAR init_vars[]= {
{sys_max_join_size.name, (char*) &sys_max_join_size, SHOW_SYS},
{sys_max_length_for_sort_data.name, (char*) &sys_max_length_for_sort_data,
SHOW_SYS},
+ {sys_max_prepared_stmt_count.name, (char*) &sys_max_prepared_stmt_count,
+ SHOW_SYS},
{sys_max_relay_log_size.name, (char*) &sys_max_relay_log_size, SHOW_SYS},
{sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS},
{sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS},
@@ -900,6 +910,7 @@ SHOW_VAR init_vars[]= {
SHOW_SYS},
{"pid_file", (char*) pidfile_name, SHOW_CHAR},
{"plugin_dir", (char*) opt_plugin_dir, SHOW_CHAR},
+ {sys_prepared_stmt_count.name, (char*) &sys_prepared_stmt_count, SHOW_SYS},
{"port", (char*) &mysqld_port, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
{"protocol_version", (char*) &protocol_version, SHOW_INT},
@@ -1367,29 +1378,40 @@ static void fix_server_id(THD *thd, enum_var_type type)
server_id_supplied = 1;
}
-bool sys_var_long_ptr::check(THD *thd, set_var *var)
+
+sys_var_long_ptr::
+sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg)
+ :sys_var_long_ptr_global(name_arg, value_ptr,
+ &LOCK_global_system_variables, after_update_arg)
+{}
+
+
+bool sys_var_long_ptr_global::check(THD *thd, set_var *var)
{
longlong v= var->value->val_int();
var->save_result.ulonglong_value= v < 0 ? 0 : v;
return 0;
}
-bool sys_var_long_ptr::update(THD *thd, set_var *var)
+bool sys_var_long_ptr_global::update(THD *thd, set_var *var)
{
ulonglong tmp= var->save_result.ulonglong_value;
- pthread_mutex_lock(&LOCK_global_system_variables);
+ pthread_mutex_lock(guard);
if (option_limits)
*value= (ulong) getopt_ull_limit_value(tmp, option_limits);
else
*value= (ulong) tmp;
- pthread_mutex_unlock(&LOCK_global_system_variables);
+ pthread_mutex_unlock(guard);
return 0;
}
-void sys_var_long_ptr::set_default(THD *thd, enum_var_type type)
+void sys_var_long_ptr_global::set_default(THD *thd, enum_var_type type)
{
+ pthread_mutex_lock(guard);
*value= (ulong) option_limits->def_value;
+ pthread_mutex_unlock(guard);
}
@@ -2824,6 +2846,13 @@ static byte *get_error_count(THD *thd)
return (byte*) &thd->sys_var_tmp.long_value;
}
+static byte *get_prepared_stmt_count(THD *thd)
+{
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ thd->sys_var_tmp.ulong_value= prepared_stmt_count;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ return (byte*) &thd->sys_var_tmp.ulong_value;
+}
/****************************************************************************
Main handling of variables:
diff --git a/sql/set_var.h b/sql/set_var.h
index f62d6ce8d2a..8076f10bb0a 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -48,11 +48,7 @@ public:
sys_after_update_func after_update;
bool no_support_one_shot;
- sys_var(const char *name_arg)
- :name(name_arg), after_update(0)
- , no_support_one_shot(1)
- { add_sys_var(); }
- sys_var(const char *name_arg,sys_after_update_func func)
+ sys_var(const char *name_arg,sys_after_update_func func= NULL)
:name(name_arg), after_update(func)
, no_support_one_shot(1)
{ add_sys_var(); }
@@ -83,15 +79,35 @@ public:
};
-class sys_var_long_ptr :public sys_var
+/*
+ A base class for all variables that require its access to
+ be guarded with a mutex.
+*/
+
+class sys_var_global: public sys_var
+{
+protected:
+ pthread_mutex_t *guard;
+public:
+ sys_var_global(const char *name_arg, sys_after_update_func after_update_arg,
+ pthread_mutex_t *guard_arg)
+ :sys_var(name_arg, after_update_arg), guard(guard_arg) {}
+};
+
+
+/*
+ A global-only ulong variable that requires its access to be
+ protected with a mutex.
+*/
+
+class sys_var_long_ptr_global: public sys_var_global
{
public:
ulong *value;
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr)
- :sys_var(name_arg),value(value_ptr) {}
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
- sys_after_update_func func)
- :sys_var(name_arg,func), value(value_ptr) {}
+ sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr,
+ pthread_mutex_t *guard_arg,
+ sys_after_update_func after_update_arg= NULL)
+ :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {}
bool check(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
@@ -101,6 +117,18 @@ public:
};
+/*
+ A global ulong variable that is protected by LOCK_global_system_variables
+*/
+
+class sys_var_long_ptr :public sys_var_long_ptr_global
+{
+public:
+ sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg= NULL);
+};
+
+
class sys_var_ulonglong_ptr :public sys_var
{
public:
@@ -179,7 +207,7 @@ class sys_var_const_str :public sys_var
public:
char *value; // Pointer to const value
sys_var_const_str(const char *name_arg, const char *value_arg)
- :sys_var(name_arg), value((char*) value_arg)
+ :sys_var(name_arg),value((char*) value_arg)
{}
bool check(THD *thd, set_var *var)
{
@@ -226,10 +254,7 @@ public:
class sys_var_thd :public sys_var
{
public:
- sys_var_thd(const char *name_arg)
- :sys_var(name_arg)
- {}
- sys_var_thd(const char *name_arg, sys_after_update_func func)
+ sys_var_thd(const char *name_arg, sys_after_update_func func= NULL)
:sys_var(name_arg,func)
{}
bool check_type(enum_var_type type) { return 0; }
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index da2e0f4e275..8fe93cb99f3 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5829,3 +5829,11 @@ ER_PARTITION_NO_TEMPORARY
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
eng "This partition function is not allowed"
swe "Denna partitioneringsfunktion är inte tillåten"
+ER_NULL_IN_VALUES_LESS_THAN
+ eng "Not allowed to use NULL value in VALUES LESS THAN"
+ swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN"
+ER_WRONG_PARTITION_NAME
+ eng "Incorrect partition name"
+ swe "Felaktigt partitionsnamn"
+ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
+ eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
diff --git a/sql/slave.cc b/sql/slave.cc
index 04ecc04ab17..4ab9e951813 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2601,7 +2601,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
DBUG_RETURN(-1);
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 7499cec6147..e09fed180ba 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3000,13 +3000,13 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
if (table)
{
-#if defined( __WIN__) || defined(OS2)
+#if defined( __WIN__)
/* Win32 can't drop a file that is open */
if (lock_type == TL_WRITE_ALLOW_READ)
{
lock_type= TL_WRITE;
}
-#endif /* __WIN__ || OS2 */
+#endif /* __WIN__ */
table_list->lock_type= lock_type;
table_list->table= table;
table->grant= table_list->grant;
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 5b060aa13c6..6e8a559ee07 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -830,6 +830,11 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
flags.sql_mode,
flags.max_sort_length,
flags.group_concat_max_len));
+ /*
+ Make InnoDB to release the adaptive hash index latch before
+ acquiring the query cache mutex.
+ */
+ ha_release_temporary_latches(thd);
STRUCT_LOCK(&structure_guard_mutex);
if (query_cache_size == 0)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 123152c95ec..63d3b053529 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -447,7 +447,7 @@ THD::~THD()
net_end(&net);
}
#endif
- stmt_map.destroy(); /* close all prepared statements */
+ stmt_map.reset(); /* close all prepared statements */
DBUG_ASSERT(lock_info.n_cursors == 0);
if (!cleanup_done)
cleanup();
@@ -1769,21 +1769,72 @@ Statement_map::Statement_map() :
}
-int Statement_map::insert(Statement *statement)
+/*
+ Insert a new statement to the thread-local statement map.
+
+ DESCRIPTION
+ If there was an old statement with the same name, replace it with the
+ new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
+ increase prepared_stmt_count, and insert the new statement. It's okay
+ to delete an old statement and fail to insert the new one.
+
+ POSTCONDITIONS
+ All named prepared statements are also present in names_hash.
+ Statement names in names_hash are unique.
+ The statement is added only if prepared_stmt_count < max_prepard_stmt_count
+ last_found_statement always points to a valid statement or is 0
+
+ RETURN VALUE
+ 0 success
+ 1 error: out of resources or max_prepared_stmt_count limit has been
+ reached. An error is sent to the client, the statement is deleted.
+*/
+
+int Statement_map::insert(THD *thd, Statement *statement)
{
- int res= my_hash_insert(&st_hash, (byte *) statement);
- if (res)
- return res;
- if (statement->name.str)
+ if (my_hash_insert(&st_hash, (byte*) statement))
{
- if ((res= my_hash_insert(&names_hash, (byte*)statement)))
- {
- hash_delete(&st_hash, (byte*)statement);
- return res;
- }
+ /*
+ Delete is needed only in case of an insert failure. In all other
+ cases hash_delete will also delete the statement.
+ */
+ delete statement;
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_st_hash;
+ }
+ if (statement->name.str && my_hash_insert(&names_hash, (byte*) statement))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_names_hash;
}
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ /*
+ We don't check that prepared_stmt_count is <= max_prepared_stmt_count
+ because we would like to allow to lower the total limit
+ of prepared statements below the current count. In that case
+ no new statements can be added until prepared_stmt_count drops below
+ the limit.
+ */
+ if (prepared_stmt_count >= max_prepared_stmt_count)
+ {
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
+ max_prepared_stmt_count);
+ goto err_max;
+ }
+ prepared_stmt_count++;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
last_found_statement= statement;
- return res;
+ return 0;
+
+err_max:
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte*) statement);
+err_names_hash:
+ hash_delete(&st_hash, (byte*) statement);
+err_st_hash:
+ return 1;
}
@@ -1797,6 +1848,47 @@ void Statement_map::close_transient_cursors()
}
+void Statement_map::erase(Statement *statement)
+{
+ if (statement == last_found_statement)
+ last_found_statement= 0;
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte *) statement);
+
+ hash_delete(&st_hash, (byte *) statement);
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count > 0);
+ prepared_stmt_count--;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+}
+
+
+void Statement_map::reset()
+{
+ /* Must be first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ my_hash_reset(&names_hash);
+ my_hash_reset(&st_hash);
+ last_found_statement= 0;
+}
+
+
+Statement_map::~Statement_map()
+{
+ /* Must go first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ hash_free(&names_hash);
+ hash_free(&st_hash);
+}
+
bool select_dumpvar::send_data(List<Item> &items)
{
List_iterator_fast<Item_func_set_user_var> li(vars);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 60ff558ac48..53712aaf69e 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -545,7 +545,7 @@ class Statement_map
public:
Statement_map();
- int insert(Statement *statement);
+ int insert(THD *thd, Statement *statement);
Statement *find_by_name(LEX_STRING *name)
{
@@ -567,36 +567,16 @@ public:
}
return last_found_statement;
}
- void erase(Statement *statement)
- {
- if (statement == last_found_statement)
- last_found_statement= 0;
- if (statement->name.str)
- {
- hash_delete(&names_hash, (byte *) statement);
- }
- hash_delete(&st_hash, (byte *) statement);
- }
/*
Close all cursors of this connection that use tables of a storage
engine that has transaction-specific state and therefore can not
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
*/
void close_transient_cursors();
+ void erase(Statement *statement);
/* Erase all statements (calls Statement destructor) */
- void reset()
- {
- my_hash_reset(&names_hash);
- my_hash_reset(&st_hash);
- transient_cursor_list.empty();
- last_found_statement= 0;
- }
-
- void destroy()
- {
- hash_free(&names_hash);
- hash_free(&st_hash);
- }
+ void reset();
+ ~Statement_map();
private:
HASH st_hash;
HASH names_hash;
@@ -1179,6 +1159,7 @@ public:
{
my_bool my_bool_value;
long long_value;
+ ulong ulong_value;
} sys_var_tmp;
struct {
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 33ad27b9d14..2784e71ccae 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -445,9 +445,8 @@ Sensitive_cursor::fetch(ulong num_rows)
if (error == NESTED_LOOP_CURSOR_LIMIT)
join->resume_nested_loop= TRUE;
-#ifdef USING_TRANSACTIONS
ha_release_temporary_latches(thd);
-#endif
+
/* Grab free_list here to correctly free it in close */
thd->restore_active_arena(this, &backup_arena);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 804270992e5..4365d5b04ce 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -870,7 +870,6 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
TABLE *table;
bool error;
uint closed_log_tables= 0, lock_logger= 0;
- TABLE_LIST *tmp_table_list;
uint path_length;
DBUG_ENTER("mysql_truncate");
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 9dfa20da522..18b63ba49a3 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -337,7 +337,6 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
ha_rows select_limit_cnt, ha_rows offset_limit_cnt)
{
TABLE_LIST *hash_tables;
- TABLE **table_ptr;
TABLE *table;
MYSQL_LOCK *lock;
List<Item> list;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 0aef574b3e4..946c0536897 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1713,7 +1713,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
since it does not find one in the list.
*/
pthread_mutex_lock(&di->mutex);
-#if !defined( __WIN__) && !defined(OS2) /* Win32 calls this in pthread_create */
+#if !defined( __WIN__) /* Win32 calls this in pthread_create */
if (my_thread_init())
{
strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES));
@@ -1729,7 +1729,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES));
goto err;
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index e1a519060aa..5a76066af8c 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -80,7 +80,7 @@ inline int lex_casecmp(const char *s, const char *t, uint len)
return (int) len+1;
}
-#include "lex_hash.h"
+#include <lex_hash.h>
void lex_init(void)
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 599480e616c..bf8a6b8cfbe 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -294,7 +294,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
{
(void) fn_format(name, ex->file_name, mysql_real_data_home, "",
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
-#if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__)
+#if !defined(__WIN__) && ! defined(__NETWARE__)
MY_STAT stat_info;
if (!my_stat(name,&stat_info,MYF(MY_WME)))
DBUG_RETURN(TRUE);
@@ -302,9 +302,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
// if we are not in slave thread, the file must be:
if (!thd->slave_thread &&
!((stat_info.st_mode & S_IROTH) == S_IROTH && // readable by others
-#ifndef __EMX__
(stat_info.st_mode & S_IFLNK) != S_IFLNK && // and not a symlink
-#endif
((stat_info.st_mode & S_IFREG) == S_IFREG ||
(stat_info.st_mode & S_IFIFO) == S_IFIFO)))
{
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 51d3cbbd41d..c63a5c23d60 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -118,10 +118,6 @@ static void test_signal(int sig_ptr)
#if !defined( DBUG_OFF)
MessageBox(NULL,"Test signal","DBUG",MB_OK);
#endif
-#if defined(OS2)
- fprintf(stderr, "Test signal %d\n", sig_ptr);
- fflush(stderr);
-#endif
}
static void init_signals(void)
{
@@ -1092,7 +1088,7 @@ pthread_handler_t handle_one_connection(void *arg)
pthread_detach_this_thread();
-#if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create
+#if !defined( __WIN__) // Win32 calls this in pthread_create
/* The following calls needs to be done before we call DBUG_ macros */
if (!(test_flags & TEST_NO_THREADS) & my_thread_init())
{
@@ -1116,7 +1112,7 @@ pthread_handler_t handle_one_connection(void *arg)
#if defined(__WIN__)
init_signals();
-#elif !defined(OS2) && !defined(__NETWARE__)
+#elif !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -1240,7 +1236,7 @@ pthread_handler_t handle_bootstrap(void *arg)
#ifndef EMBEDDED_LIBRARY
pthread_detach_this_thread();
thd->thread_stack= (char*) &thd;
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -1969,9 +1965,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#ifdef __WIN__
sleep(1); // must wait after eof()
#endif
-#ifndef OS2
send_eof(thd); // This is for 'quit request'
-#endif
close_connection(thd, 0, 1);
close_thread_tables(thd); // Free before kill
kill_mysql();
@@ -2991,6 +2985,11 @@ end_with_restore_list:
#else
{
ulong priv=0;
+ ulong priv_needed= ALTER_ACL;
+ /* We also require DROP priv for ALTER TABLE ... DROP PARTITION */
+ if (lex->alter_info.flags & ALTER_DROP_PARTITION)
+ priv_needed|= DROP_ACL;
+
if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name);
@@ -3015,7 +3014,7 @@ end_with_restore_list:
else
select_lex->db= first_table->db;
}
- if (check_access(thd, ALTER_ACL, first_table->db,
+ if (check_access(thd, priv_needed, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)) ||
check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0,
@@ -3026,7 +3025,7 @@ end_with_restore_list:
goto error; /* purecov: inspected */
if (grant_option)
{
- if (check_grant(thd, ALTER_ACL, all_tables, 0, UINT_MAX, 0))
+ if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0))
goto error;
if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL))
{ // Rename of table
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 5cae38f2773..aae80f07b71 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1614,6 +1614,21 @@ static int add_key_partition(File fptr, List<char> field_list)
return err;
}
+static int add_name_string(File fptr, const char *name)
+{
+ int err;
+ String name_string("", 0, system_charset_info);
+ THD *thd= current_thd;
+ ulonglong save_options= thd->options;
+
+ thd->options= 0;
+ append_identifier(thd, &name_string, name,
+ strlen(name));
+ thd->options= save_options;
+ err= add_string_object(fptr, &name_string);
+ return err;
+}
+
static int add_int(File fptr, longlong number)
{
llstr(number, buff);
@@ -1912,7 +1927,7 @@ char *generate_partition_syntax(partition_info *part_info,
part_info->part_state_len= part_state_id+1;
}
err+= add_partition(fptr);
- err+= add_string(fptr, part_elem->partition_name);
+ err+= add_name_string(fptr, part_elem->partition_name);
err+= add_space(fptr);
err+= add_partition_values(fptr, part_info, part_elem);
if (!part_info->is_sub_partitioned())
@@ -1928,7 +1943,7 @@ char *generate_partition_syntax(partition_info *part_info,
{
part_elem= sub_it++;
err+= add_subpartition(fptr);
- err+= add_string(fptr, part_elem->partition_name);
+ err+= add_name_string(fptr, part_elem->partition_name);
err+= add_space(fptr);
err+= add_partition_options(fptr, part_elem);
if (j != (no_subparts-1))
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 3d42bfea104..b4e42dc4700 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -529,8 +529,16 @@ static int plugin_initialize(struct st_plugin_int *plugin)
switch (plugin->plugin->type)
{
case MYSQL_STORAGE_ENGINE_PLUGIN:
- sql_print_error("Storage Engine plugins are unsupported in this version.");
- goto err;
+ if (ha_initialize_handlerton((handlerton*) plugin->plugin->info))
+ {
+ sql_print_error("Plugin '%s' handlerton init returned error.",
+ plugin->name.str);
+ DBUG_PRINT("warning", ("Plugin '%s' handlerton init returned error.",
+ plugin->name.str));
+ goto err;
+ }
+ break;
+
default:
break;
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ced15b3f728..e0cf9095a22 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1848,10 +1848,13 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep)))
DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */
- if (thd->stmt_map.insert(stmt))
+ if (thd->stmt_map.insert(thd, stmt))
{
- delete stmt;
- DBUG_VOID_RETURN; /* out of memory */
+ /*
+ The error is set in the insert. The statement itself
+ will be also deleted there (this is how the hash works).
+ */
+ DBUG_VOID_RETURN;
}
/* Reset warnings from previous command */
@@ -2028,11 +2031,17 @@ void mysql_sql_stmt_prepare(THD *thd)
DBUG_VOID_RETURN; /* out of memory */
}
- if (stmt->set_name(name) || thd->stmt_map.insert(stmt))
+ /* Set the name first, insert should know that this statement has a name */
+ if (stmt->set_name(name))
{
delete stmt;
DBUG_VOID_RETURN;
}
+ if (thd->stmt_map.insert(thd, stmt))
+ {
+ /* The statement is deleted and an error is set if insert fails */
+ DBUG_VOID_RETURN;
+ }
if (stmt->prepare(query, query_len+1))
{
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index e900d33f743..6ec010b8a44 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -464,6 +464,12 @@ impossible position";
(rli->group_master_log_pos)
*/
int4store((char*) packet->ptr()+LOG_POS_OFFSET+1, 0);
+ /*
+ if reconnect master sends FD event with `created' as 0
+ to avoid destroying temp tables.
+ */
+ int4store((char*) packet->ptr()+LOG_EVENT_MINIMAL_HEADER_LEN+
+ ST_CREATED_OFFSET+1, (ulong) 0);
/* send it */
if (my_net_write(net, (char*)packet->ptr(), packet->length()))
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 19535f3182a..95433828a1e 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -753,6 +753,7 @@ mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd)
static const char *require_quotes(const char *name, uint name_length)
{
uint length;
+ bool pure_digit= TRUE;
const char *end= name + name_length;
for (; name < end ; name++)
@@ -761,7 +762,11 @@ static const char *require_quotes(const char *name, uint name_length)
length= my_mbcharlen(system_charset_info, chr);
if (length == 1 && !system_charset_info->ident_map[chr])
return name;
+ if (length == 1 && (chr < '0' || chr > '9'))
+ pure_digit= FALSE;
}
+ if (pure_digit)
+ return name;
return 0;
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 2687b64841f..6cc2ad266e5 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -72,12 +72,19 @@ uint filename_to_tablename(const char *from, char *to, uint to_length)
uint tablename_to_filename(const char *from, char *to, uint to_length)
{
- uint errors;
+ uint errors, length;
if (from[0] == '#' && !strncmp(from, MYSQL50_TABLE_NAME_PREFIX,
MYSQL50_TABLE_NAME_PREFIX_LENGTH))
return my_snprintf(to, to_length, "%s", from + 9);
- return strconvert(system_charset_info, from,
- &my_charset_filename, to, to_length, &errors);
+ length= strconvert(system_charset_info, from,
+ &my_charset_filename, to, to_length, &errors);
+ if (check_if_legal_tablename(to) &&
+ length + 4 < to_length)
+ {
+ memcpy(to + length, "@@@", 4);
+ length+= 3;
+ }
+ return length;
}
@@ -347,7 +354,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
lpt->create_info, lpt->new_create_list, lpt->key_count,
lpt->key_info_buffer, lpt->table->file)) ||
((flags & WFRM_CREATE_HANDLER_FILES) &&
- lpt->table->file->create_handler_files(path)))
+ lpt->table->file->create_handler_files(path, lpt->create_info)))
{
error= 1;
goto end;
@@ -3625,11 +3632,7 @@ err:
bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
{
-#ifdef OS2
- thr_lock_type lock_type = TL_WRITE;
-#else
thr_lock_type lock_type = TL_READ_NO_INSERT;
-#endif
DBUG_ENTER("mysql_analyze_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
@@ -3640,11 +3643,7 @@ bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt)
{
-#ifdef OS2
- thr_lock_type lock_type = TL_WRITE;
-#else
thr_lock_type lock_type = TL_READ_NO_INSERT;
-#endif
DBUG_ENTER("mysql_check_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
@@ -3964,6 +3963,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN];
char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias;
char index_file[FN_REFLEN], data_file[FN_REFLEN];
+ char path[FN_REFLEN];
char reg_path[FN_REFLEN+1];
ha_rows copied,deleted;
ulonglong next_insert_id;
@@ -4000,6 +4000,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db))
new_db= db;
build_table_filename(reg_path, sizeof(reg_path), db, table_name, reg_ext);
+ build_table_filename(path, sizeof(path), db, table_name, "");
used_fields=create_info->used_fields;
@@ -4773,6 +4774,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
KEY_PART_INFO *part_end;
DBUG_PRINT("info", ("No new_table, checking add/drop index"));
+ table->file->prepare_for_alter();
if (index_add_count)
{
#ifdef XXX_TO_BE_DONE_LATER_BY_WL3020_AND_WL1892
@@ -4788,7 +4790,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
error= (mysql_create_frm(thd, reg_path, db, table_name,
create_info, prepared_create_list, key_count,
key_info_buffer, table->file) ||
- table->file->create_handler_files(reg_path));
+ table->file->create_handler_files(path, create_info));
VOID(pthread_mutex_unlock(&LOCK_open));
if (error)
goto err;
@@ -4834,7 +4836,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
error= (mysql_create_frm(thd, reg_path, db, table_name,
create_info, prepared_create_list, key_count,
key_info_buffer, table->file) ||
- table->file->create_handler_files(reg_path));
+ table->file->create_handler_files(path, create_info));
VOID(pthread_mutex_unlock(&LOCK_open));
if (error)
goto err;
@@ -4904,19 +4906,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
/*end of if (index_drop_count)*/
- if (index_add_count || index_drop_count)
- {
- /*
- The final .frm file is already created as a temporary file
- and will be renamed to the original table name later.
- */
+ /*
+ The final .frm file is already created as a temporary file
+ and will be renamed to the original table name later.
+ */
- /* Need to commit before a table is unlocked (NDB requirement). */
- DBUG_PRINT("info", ("Committing after add/drop index"));
- if (ha_commit_stmt(thd) || ha_commit(thd))
- goto err;
- committed= 1;
- }
+ /* Need to commit before a table is unlocked (NDB requirement). */
+ DBUG_PRINT("info", ("Committing before unlocking table"));
+ if (ha_commit_stmt(thd) || ha_commit(thd))
+ goto err;
+ committed= 1;
}
/*end of if (! new_table) for add/drop index*/
@@ -4998,7 +4997,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
}
-#if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2))
+#if !defined( __WIN__)
if (table->file->has_transactions())
#endif
{
@@ -5011,7 +5010,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
table=0; // Marker that table is closed
no_table_reopen= TRUE;
}
-#if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2))
+#if !defined( __WIN__)
else
table->file->extra(HA_EXTRA_FORCE_REOPEN); // Don't use this file anymore
#endif
@@ -5061,7 +5060,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
VOID(pthread_mutex_lock(&LOCK_open));
}
/* Tell the handler that a new frm file is in place. */
- if (table->file->create_handler_files(reg_path))
+ if (table->file->create_handler_files(path, create_info))
{
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index e48748bcfa5..2f91472ad2d 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -3639,7 +3639,7 @@ part_definition:
;
part_name:
- ident_or_text
+ ident
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
@@ -3721,6 +3721,11 @@ part_func_max:
yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
YYABORT;
}
+ if (Lex->part_info->curr_part_elem->has_null_value)
+ {
+ yyerror(ER(ER_NULL_IN_VALUES_LESS_THAN));
+ YYABORT;
+ }
}
;
diff --git a/sql/udf_example.cc b/sql/udf_example.cc
index 73e49aef178..f4f936f34ef 100644
--- a/sql/udf_example.cc
+++ b/sql/udf_example.cc
@@ -497,7 +497,7 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result,
}
}
}
- *length= (ulong) (max(0, result - org_result - 1));
+ *length= (unsigned long) (result - org_result);
return org_result;
}
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7cb79e866f9..bb197181e2a 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -330,7 +330,7 @@ int rea_create_table(THD *thd, const char *path,
// Make sure mysql_create_frm din't remove extension
DBUG_ASSERT(*fn_rext(frm_name));
- if (file->create_handler_files(path))
+ if (file->create_handler_files(path, create_info))
goto err_handler;
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
create_info,0))
diff --git a/sql/watchdog_mysqld b/sql/watchdog_mysqld
deleted file mode 100755
index 0b26bb15acd..00000000000
--- a/sql/watchdog_mysqld
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/perl
-# Copyright (C) 1979-1998 TcX AB & Monty Program KB & Detron HB
-#
-# This software is distributed with NO WARRANTY OF ANY KIND. No author or
-# distributor accepts any responsibility for the consequences of using it, or
-# for whether it serves any particular purpose or works at all, unless he or
-# she says so in writing. Refer to the Free Public License (the "License")
-# for full details.
-#
-# Every copy of this file must include a copy of the License, normally in a
-# plain ASCII text file named PUBLIC. The License grants you the right to
-# copy, modify and redistribute this file, but only under certain conditions
-# described in the License. Among other things, the License requires that
-# the copyright notice and this notice be preserved on all copies. */
-
-#
-# This scripts is started by safe_mysqld. It checks that MySQL is alive and
-# working ( = answering to ping). If not, force mysqld down, check all
-# tables and let safe_mysqld restart the server.
-#
-# For this to work, you should have procmail installed as the commands
-# 'lockfile' and is used to sync with safe_mysqld
-#
-# NOTE: You should only use this script as a last resort if mysqld locks
-# up unexpectedly in a critical application and you have to get it to
-# work temporarily while waiting for a solution from mysql@tcx.se or
-# mysql-support@tcx.se
-
-
-use POSIX "waitpid";
-
-# Arguments from safe_mysqld
-
-if ($#ARGV != 4)
-{
- print "$0: Wrong number of arguments. Aborting\n";
- exit 1;
-}
-
-$lock_file=shift; # File to lock to sync with safe_mysqld
-$pid_file=shift; # Pid file used by mysqld
-$bin_dir=shift; # Directory where mysqladmin is
-$test_timeout=shift; # Time between testing if mysqld is alive
-$wait_timeout=shift; # How long time to wait for ping
-
-$|=1; # autoflush
-
-# Check that mysqld has started properly
-
-for ($i=1 ; $i < 10 ; $i ++)
-{
- last if (-e $pid_file);
-}
-sleep(1); # If server has just created the file
-if (($mysqld_pid=`cat $pid_file`) <= 0)
-{
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
-}
-
-# Start pinging mysqld
-
-for (;;)
-{
- sleep($test_timeout); # Time between tests
- `lockfile $lock_file > /dev/null 2>&1`; # Sync with safe_mysqld
- if (($pid=fork()) == 0)
- {
- setpgrp(0,0);
- exit(int(system("$bin_dir/mysqladmin -w status > /dev/null")/256));
- }
- for ($i=0; ($res=waitpid(-1,&POSIX::WNOHANG)) == 0 && $i < $wait_timeout ; $i++)
- {
- sleep(1);
- }
- if ($res == 0)
- {
- print "$0: Warning: mysqld hanged; Killing it so that safe_mysqld can restart it!\n";
- $mysqld_pid= `cat $pid_file`;
- if ($mysqld_pid <= 0)
- {
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
- system("rm -f $lock_file");
- kill(-9,$pid);
- exit 1;
- }
- print "$0: Sending signal 15 to $mysqld_pid\n";
- kill(-15, $pid,$mysqld_pid); # Give it a last change to die nicely
- for ($i=0 ; $i < 5 ; $i++) { sleep(1); } # Wait 5 seconds (signal safe)
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$pid,$mysqld_pid) != 0)
- {
- print "$0: Sending signal 9 to $mysqld_pid\n";
- kill(-9,$pid,$mysqld_pid); # No time to be nice anymore
- sleep(2); # Give system time to clean up
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$mysqld_pid) != 0)
- {
- print "$0: Warning: mysqld don't want to die. Aborting\n";
- system("rm -f $lock_file");
- exit 1;
- }
- }
- # safe_mysqld will not restart mysqld if the pid file doesn't exists
- system("rm $pid_file");
- system("touch $pid_file");
- }
- elsif ($res == -1)
- {
- print "$0: Error: waitpid returned $res when wating for pid $pid\nPlease verify that $0 is correct for your system\n";
- system("rm -f $lock_file");
- exit 1;
- }
- else
- {
- $exit_code=int($?/256);
- if ($exit_code != 0)
- {
- print "$0: Warning: mysqladmin returned exit code $exit_code\n";
- }
- else
- {
- #print "mysqld is alive and feeling well\n";
- }
- }
- system("rm -f $lock_file"); # safemysqld will now take over
-}