summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <jan@hundin.mysql.fi>2004-12-21 07:49:38 +0200
committerunknown <jan@hundin.mysql.fi>2004-12-21 07:49:38 +0200
commit52d080f09979deb2063a8010feee3b925a3d7938 (patch)
tree5d3767a8558fd05da17b5d4e3f361ef643d3a9b8 /sql
parent868266f1b1ec0adde75db2887bbc73306441caeb (diff)
parent272b380b459633454ad0ffbd877a986e06196216 (diff)
downloadmariadb-git-52d080f09979deb2063a8010feee3b925a3d7938.tar.gz
Content merge.
sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/handler.h: Auto merged sql/mysql_priv.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_yacc.yy: Auto merged innobase/trx/trx0roll.c: Auto merged.
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am7
-rw-r--r--sql/examples/ha_archive.cc93
-rw-r--r--sql/examples/ha_archive.h4
-rw-r--r--sql/field.cc676
-rw-r--r--sql/field.h117
-rw-r--r--sql/field_conv.cc42
-rw-r--r--sql/ha_berkeley.cc27
-rwxr-xr-xsql/ha_federated.cc1722
-rwxr-xr-xsql/ha_federated.h177
-rw-r--r--sql/ha_heap.cc6
-rw-r--r--sql/ha_innodb.cc27
-rw-r--r--sql/ha_innodb.h4
-rw-r--r--sql/ha_myisam.cc20
-rw-r--r--sql/ha_myisam.h2
-rw-r--r--sql/handler.cc9
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item.cc55
-rw-r--r--sql/item.h13
-rw-r--r--sql/item_sum.cc6
-rw-r--r--sql/key.cc84
-rw-r--r--sql/mysql_priv.h9
-rw-r--r--sql/mysqld.cc46
-rw-r--r--sql/opt_range.cc142
-rw-r--r--sql/opt_range.h18
-rw-r--r--sql/protocol.cc2
-rw-r--r--sql/set_var.cc7
-rw-r--r--sql/share/Makefile.am2
-rw-r--r--sql/share/errmsg.txt449
-rw-r--r--sql/sp.cc1
-rw-r--r--sql/sql_base.cc2
-rw-r--r--sql/sql_derived.cc17
-rw-r--r--sql/sql_lex.cc35
-rw-r--r--sql/sql_parse.cc34
-rw-r--r--sql/sql_prepare.cc3
-rw-r--r--sql/sql_select.cc144
-rw-r--r--sql/sql_select.h12
-rw-r--r--sql/sql_show.cc101
-rw-r--r--sql/sql_table.cc19
-rw-r--r--sql/sql_yacc.yy45
-rw-r--r--sql/structs.h2
-rw-r--r--sql/table.cc31
-rw-r--r--sql/table.h1
-rw-r--r--sql/time.cc162
-rw-r--r--sql/unireg.cc2
-rw-r--r--sql/unireg.h2
45 files changed, 3388 insertions, 993 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 501f9b03e0d..8ff55898ba4 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -62,7 +62,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
parse_file.h sql_view.h sql_trigger.h \
examples/ha_example.h examples/ha_archive.h \
- examples/ha_tina.h
+ examples/ha_tina.h \
+ ha_federated.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
@@ -98,7 +99,9 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
examples/ha_example.cc examples/ha_archive.cc \
- examples/ha_tina.cc
+ examples/ha_tina.cc \
+ ha_federated.cc
+
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 3f176da1c7f..f754793e319 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -22,6 +22,7 @@
#ifdef HAVE_ARCHIVE_DB
#include "ha_archive.h"
+#include <my_dir.h>
/*
First, if you want to understand storage engines you should look at
@@ -227,8 +228,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
/*
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
- Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during
- a read that the file was dirty we will force a rebuild of this file.
+ Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
{
@@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share->use_count= 0;
share->table_name_length= length;
share->table_name= tmp_name;
+ share->crashed= FALSE;
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name);
@@ -315,24 +316,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
goto error;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- {
- /*
- The problem here is that for some reason, probably a crash, the meta
- file has been corrupted. So what do we do? Well we try to rebuild it
- ourself. Once that happens, we reread it, but if that fails we just
- call it quits and return an error.
- */
- if (rebuild_meta_file(share->table_name, share->meta_file))
- goto error;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- goto error;
- }
/*
After we read, we set the file to dirty. When we close, we will do the
- opposite.
+ opposite. If the meta file will not open we assume it is crashed and
+ leave it up to the user to fix.
*/
- (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
+ if (read_meta_file(share->meta_file, &share->rows_recorded))
+ share->crashed= TRUE;
+ else
+ (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
@@ -408,7 +400,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_archive::open");
if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
@@ -530,6 +522,9 @@ int ha_archive::write_row(byte * buf)
z_off_t written;
DBUG_ENTER("ha_archive::write_row");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
@@ -578,6 +573,9 @@ int ha_archive::rnd_init(bool scan)
{
DBUG_ENTER("ha_archive::rnd_init");
int read; // gzread() returns int, and we use this to check the header
+
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/* We rewind the file so that we can read from the beginning if scan */
if (scan)
@@ -672,6 +670,9 @@ int ha_archive::rnd_next(byte *buf)
int rc;
DBUG_ENTER("ha_archive::rnd_next");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
if (!scan_rows)
DBUG_RETURN(HA_ERR_END_OF_FILE);
scan_rows--;
@@ -722,22 +723,23 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
}
/*
- This method rebuilds the meta file. It does this by walking the datafile and
+ This method repairs the meta file. It does this by walking the datafile and
rewriting the meta file.
*/
-int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
+int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc;
byte *buf;
ulonglong rows_recorded= 0;
- gzFile rebuild_file; /* Archive file we are working with */
+ gzFile rebuild_file; // Archive file we are working with
+ File meta_file; // Meta file we use
char data_file_name[FN_REFLEN];
- DBUG_ENTER("ha_archive::rebuild_meta_file");
+ DBUG_ENTER("ha_archive::repair");
/*
Open up the meta file to recreate it.
*/
- fn_format(data_file_name, table_name, "", ARZ,
+ fn_format(data_file_name, share->table_name, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL)
DBUG_RETURN(errno ? errno : -1);
@@ -767,11 +769,18 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
*/
if (rc == HA_ERR_END_OF_FILE)
{
- (void)write_meta_file(meta_file, rows_recorded, FALSE);
+ fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
+ {
+ rc= HA_ERR_CRASHED_ON_USAGE;
+ goto error;
+ }
+ (void)write_meta_file(meta_file, rows_recorded, TRUE);
rc= 0;
}
my_free((gptr) buf, MYF(0));
+ share->crashed= FALSE;
error:
gzclose(rebuild_file);
@@ -790,13 +799,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
char block[IO_SIZE];
char writer_filename[FN_REFLEN];
+ /* Closing will cause all data waiting to be flushed */
+ gzclose(share->archive_write);
+ share->archive_write= NULL;
+
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- /* Closing will cause all data waiting to be flushed, to be flushed */
- gzclose(share->archive_write);
-
if ((reader= gzopen(share->data_file_name, "rb")) == NULL)
DBUG_RETURN(-1);
@@ -814,16 +824,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
my_rename(writer_filename,share->data_file_name,MYF(0));
- /*
- We reopen the file in case some IO is waiting to go through.
- In theory the table is closed right after this operation,
- but it is possible for IO to still happen.
- I may be being a bit too paranoid right here.
- */
- if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- DBUG_RETURN(errno ? errno : -1);
- share->dirty= FALSE;
-
DBUG_RETURN(0);
}
@@ -880,13 +880,27 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
void ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
-
/*
This should be an accurate number now, though bulk and delayed inserts can
cause the number to be inaccurate.
*/
records= share->rows_recorded;
deleted= 0;
+ /* Costs quite a bit more to get all information */
+ if (flag & HA_STATUS_TIME)
+ {
+ MY_STAT file_stat; // Stat information for the data file
+
+ VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
+
+ mean_rec_length= table->reclength + buffer.alloced_length();
+ data_file_length= file_stat.st_size;
+ create_time= file_stat.st_ctime;
+ update_time= file_stat.st_mtime;
+ max_data_file_length= share->rows_recorded * mean_rec_length;
+ }
+ delete_length= 0;
+ index_file_length=0;
DBUG_VOID_RETURN;
}
@@ -900,7 +914,7 @@ void ha_archive::info(uint flag)
*/
void ha_archive::start_bulk_insert(ha_rows rows)
{
- DBUG_ENTER("ha_archive::info");
+ DBUG_ENTER("ha_archive::start_bulk_insert");
bulk_insert= TRUE;
DBUG_VOID_RETURN;
}
@@ -912,6 +926,7 @@ void ha_archive::start_bulk_insert(ha_rows rows)
*/
int ha_archive::end_bulk_insert()
{
+ DBUG_ENTER("ha_archive::end_bulk_insert");
bulk_insert= FALSE;
share->dirty= TRUE;
DBUG_RETURN(0);
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 809f52a883b..07bc7baa400 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
File meta_file; /* Meta file we use */
gzFile archive_write; /* Archive file we are working with */
bool dirty; /* Flag for if a flush should occur */
+ bool crashed; /* Meta file is crashed */
ulonglong rows_recorded; /* Number of rows in tables */
} ARCHIVE_SHARE;
@@ -91,13 +92,14 @@ public:
int write_meta_file(File meta_file, ulonglong rows, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table);
int free_share(ARCHIVE_SHARE *share);
- int rebuild_meta_file(char *table_name, File meta_file);
+ bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(gzFile file_to_read);
int write_data_header(gzFile file_to_write);
void position(const byte *record);
void info(uint);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
void start_bulk_insert(ha_rows rows);
int end_bulk_insert();
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
diff --git a/sql/field.cc b/sql/field.cc
index dafb3dc25da..ebeee476985 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -329,6 +329,27 @@ bool Field::field_cast_compatible(Field::field_cast_enum type)
}
+/*
+ Interpret field value as an integer but return the result as a string.
+
+ This is used for printing bit_fields as numbers while debugging
+*/
+
+String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag)
+{
+ CHARSET_INFO *cs= &my_charset_bin;
+ uint length= 21;
+ longlong value= val_int();
+ if (val_buffer->alloc(length))
+ return 0;
+ length= (uint) cs->cset->longlong10_to_str(cs, (char*) val_buffer->ptr(),
+ length, unsigned_flag ? 10 : -10,
+ value);
+ val_buffer->length(length);
+ return val_buffer;
+}
+
+
/****************************************************************************
** Functions for the base classes
** This is an unpacked number.
@@ -467,11 +488,11 @@ bool Field::get_time(TIME *ltime)
Needs to be changed if/when we want to support different time formats
*/
-void Field::store_time(TIME *ltime,timestamp_type type)
+int Field::store_time(TIME *ltime, timestamp_type type)
{
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= (uint) my_TIME_to_str(ltime, buff);
- store(buff, length, &my_charset_bin);
+ return store(buff, length, &my_charset_bin);
}
@@ -500,6 +521,22 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table)
return tmp;
}
+
+Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit)
+{
+ Field *tmp;
+ if ((tmp= new_field(root, new_table)))
+ {
+ tmp->ptr= new_ptr;
+ tmp->null_ptr= new_null_ptr;
+ tmp->null_bit= new_null_bit;
+ }
+ return tmp;
+}
+
+
/****************************************************************************
Field_null, a field that always return NULL
****************************************************************************/
@@ -3089,7 +3126,7 @@ int Field_timestamp::store(longlong nr)
bool in_dst_time_gap;
THD *thd= table->in_use;
- if (number_to_TIME(nr, &l_time, 0, &error))
+ if (number_to_datetime(nr, &l_time, 0, &error))
{
if (!(timestamp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap)))
{
@@ -3372,6 +3409,16 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs)
}
+int Field_time::store_time(TIME *ltime, timestamp_type type)
+{
+ long tmp= ((ltime->month ? 0 : ltime->day * 24L) + ltime->hour) * 10000L +
+ (ltime->minute * 100 + ltime->second);
+ if (ltime->neg)
+ tmp= -tmp;
+ return Field_time::store((longlong) tmp);
+}
+
+
int Field_time::store(double nr)
{
long tmp;
@@ -3953,17 +4000,20 @@ int Field_newdate::store(longlong nr)
return error;
}
-void Field_newdate::store_time(TIME *ltime,timestamp_type type)
+int Field_newdate::store_time(TIME *ltime,timestamp_type type)
{
long tmp;
+ int error= 0;
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
tmp=ltime->year*16*32+ltime->month*32+ltime->day;
else
{
tmp=0;
+ error= 1;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1);
}
int3store(ptr,tmp);
+ return error;
}
bool Field_newdate::send_binary(Protocol *protocol)
@@ -4112,7 +4162,7 @@ int Field_datetime::store(longlong nr)
int error;
longlong initial_nr= nr;
- nr= number_to_TIME(nr, &not_used, 1, &error);
+ nr= number_to_datetime(nr, &not_used, 1, &error);
if (error)
set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -4131,9 +4181,10 @@ int Field_datetime::store(longlong nr)
}
-void Field_datetime::store_time(TIME *ltime,timestamp_type type)
+int Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
longlong tmp;
+ int error= 0;
/*
We don't perform range checking here since values stored in TIME
structure always fit into DATETIME range.
@@ -4144,6 +4195,7 @@ void Field_datetime::store_time(TIME *ltime,timestamp_type type)
else
{
tmp=0;
+ error= 1;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1);
}
#ifdef WORDS_BIGENDIAN
@@ -4154,6 +4206,7 @@ void Field_datetime::store_time(TIME *ltime,timestamp_type type)
else
#endif
longlongstore(ptr,tmp);
+ return error;
}
bool Field_datetime::send_binary(Protocol *protocol)
@@ -4641,7 +4694,19 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
}
/****************************************************************************
-** VARCHAR type (Not available for the end user yet)
+ VARCHAR type
+ Data in field->ptr is stored as:
+ 1 or 2 bytes length-prefix-header (from Field_varstring::length_bytes)
+ data
+
+ NOTE:
+ When VARCHAR is stored in a key (for handler::index_read() etc) it's always
+ stored with a 2 byte prefix. (Just like blob keys).
+
+ Normally length_bytes is calculated as (field_length < 256 : 1 ? 2)
+ The exception is if there is a prefix key field that is part of a long
+ VARCHAR, in which case field_length for this may be 1 but the length_bytes
+ is 2.
****************************************************************************/
@@ -4670,8 +4735,11 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
from,from+length,
field_length/
field_charset->mbmaxlen);
- memcpy(ptr + HA_KEY_BLOB_LENGTH, from, copy_length);
- int2store(ptr, copy_length);
+ memcpy(ptr + length_bytes, from, copy_length);
+ if (length_bytes == 1)
+ *ptr= (uchar) copy_length;
+ else
+ int2store(ptr, copy_length);
if (copy_length < length)
error= 1;
@@ -4684,91 +4752,117 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_varstring::store(longlong nr)
{
char buff[64];
- int l;
- CHARSET_INFO *cs=charset();
- l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr);
- return Field_varstring::store(buff,(uint)l,cs);
+ uint length;
+ length= (uint) (field_charset->cset->longlong10_to_str)(field_charset,
+ buff,
+ sizeof(buff),
+ -10,nr);
+ return Field_varstring::store(buff, length, field_charset);
}
double Field_varstring::val_real(void)
{
int not_used;
- uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH;
- CHARSET_INFO *cs=charset();
- return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, (char**)0, &not_used);
+ uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ return my_strntod(field_charset, ptr+length_bytes, length, (char**) 0,
+ &not_used);
}
longlong Field_varstring::val_int(void)
{
int not_used;
- uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH;
- CHARSET_INFO *cs=charset();
- return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, &not_used);
+ uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ return my_strntoll(field_charset, ptr+length_bytes, length, 10, NULL,
+ &not_used);
}
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
- uint length=uint2korr(ptr);
- val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset);
+ uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ val_ptr->set((const char*) ptr+length_bytes, length, field_charset);
return val_ptr;
}
int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
{
- uint a_length=uint2korr(a_ptr);
- uint b_length=uint2korr(b_ptr);
+ uint a_length, b_length;
int diff;
+
+ if (length_bytes == 1)
+ {
+ a_length= (uint) (uchar) *a_ptr;
+ b_length= (uint) (uchar) *b_ptr;
+ }
+ else
+ {
+ a_length= uint2korr(a_ptr);
+ b_length= uint2korr(b_ptr);
+ }
diff= field_charset->coll->strnncollsp(field_charset,
(const uchar*) a_ptr+
- HA_KEY_BLOB_LENGTH,
+ length_bytes,
a_length,
(const uchar*) b_ptr+
- HA_KEY_BLOB_LENGTH,
+ length_bytes,
b_length,0);
return diff;
}
+/*
+ NOTE: varstring and blob keys are ALWAYS stored with a 2 byte length prefix
+*/
+
int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length)
{
char *blob1;
- uint length= uint2korr(ptr);
- CHARSET_INFO *cs= charset();
- uint char_length= max_key_length / cs->mbmaxlen;
+ uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ uint char_length= max_key_length / field_charset->mbmaxlen;
- char_length= my_charpos(cs, ptr + HA_KEY_BLOB_LENGTH,
- ptr + HA_KEY_BLOB_LENGTH + length, char_length);
+ char_length= my_charpos(field_charset, ptr + length_bytes,
+ ptr + length_bytes + length, char_length);
set_if_smaller(length, char_length);
- return cs->coll->strnncollsp(cs,
- (const uchar*) ptr+2, length,
- (const uchar*) key_ptr+HA_KEY_BLOB_LENGTH,
- uint2korr(key_ptr), 0);
+ return field_charset->coll->strnncollsp(field_charset,
+ (const uchar*) ptr + length_bytes,
+ length,
+ (const uchar*) key_ptr+
+ HA_KEY_BLOB_LENGTH,
+ uint2korr(key_ptr), 0);
}
+/*
+ Compare to key segments (always 2 byte length prefix)
+
+ NOTE
+ This is used only to compare key segments created for index_read().
+ (keys are created and compared in key.cc)
+*/
+
int Field_varstring::key_cmp(const byte *a,const byte *b)
{
- CHARSET_INFO *cs= charset();
- return cs->coll->strnncollsp(cs,
- (const uchar*) a + HA_KEY_BLOB_LENGTH,
- uint2korr(a),
- (const uchar*) b + HA_KEY_BLOB_LENGTH,
- uint2korr(b),
- 0);
+ return field_charset->coll->strnncollsp(field_charset,
+ (const uchar*) a +
+ HA_KEY_BLOB_LENGTH,
+ uint2korr(a),
+ (const uchar*) b +
+ HA_KEY_BLOB_LENGTH,
+ uint2korr(b),
+ 0);
}
void Field_varstring::sort_string(char *to,uint length)
{
- uint tot_length= uint2korr(ptr);
+ uint tot_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
tot_length= my_strnxfrm(field_charset,
(uchar*) to, length,
- (uchar*) ptr+HA_KEY_BLOB_LENGTH,
+ (uchar*) ptr + length_bytes,
tot_length);
if (tot_length < length)
field_charset->cset->fill(field_charset, to+tot_length,length-tot_length,
@@ -4776,6 +4870,18 @@ void Field_varstring::sort_string(char *to,uint length)
}
+enum ha_base_keytype Field_varstring::key_type() const
+{
+ enum ha_base_keytype res;
+
+ if (binary())
+ res= length_bytes == 1 ? HA_KEYTYPE_VARBINARY1 : HA_KEYTYPE_VARBINARY2;
+ else
+ res= length_bytes == 1 ? HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2;
+ return res;
+}
+
+
void Field_varstring::sql_type(String &res) const
{
THD *thd= table->in_use;
@@ -4793,9 +4899,14 @@ void Field_varstring::sql_type(String &res) const
}
+/*
+ Functions to create a packed row.
+ Here the number of length bytes are depending on the given max_length
+*/
+
char *Field_varstring::pack(char *to, const char *from, uint max_length)
{
- uint length=uint2korr(from);
+ uint length= length_bytes == 1 ? (uint) (uchar) *from : uint2korr(from);
set_if_smaller(max_length, field_length);
if (length > max_length)
length=max_length;
@@ -4803,39 +4914,104 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length)
if (max_length > 255)
*to++= (char) (length >> 8);
if (length)
- memcpy(to, from+HA_KEY_BLOB_LENGTH, length);
+ memcpy(to, from+length_bytes, length);
return to+length;
}
-char *Field_varstring::pack_key(char *to, const char *from, uint max_length)
+char *Field_varstring::pack_key(char *to, const char *key, uint max_length)
{
- uint length=uint2korr(from);
+ uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key);
uint char_length= ((field_charset->mbmaxlen > 1) ?
max_length/field_charset->mbmaxlen : max_length);
- from+= HA_KEY_BLOB_LENGTH;
+ key+= length_bytes;
if (length > char_length)
{
- char_length= my_charpos(field_charset, from, from+length, char_length);
+ char_length= my_charpos(field_charset, key, key+length, char_length);
set_if_smaller(length, char_length);
}
*to++= (char) (length & 255);
if (max_length > 255)
*to++= (char) (length >> 8);
if (length)
- memcpy(to, from, length);
+ memcpy(to, key, length);
+ return to+length;
+}
+
+
+/*
+ Unpack a key into a record buffer.
+
+ SYNOPSIS
+ unpack_key()
+ to Pointer into the record buffer.
+ key Pointer to the packed key.
+ max_length Key length limit from key description.
+
+ DESCRIPTION
+ A VARCHAR key has a maximum size of 64K-1.
+ In its packed form, the length field is one or two bytes long,
+ depending on 'max_length'.
+
+ RETURN
+ Pointer to end of 'key' (To the next key part if multi-segment key)
+*/
+
+const char *Field_varstring::unpack_key(char *to, const char *key,
+ uint max_length)
+{
+ /* get length of the blob key */
+ uint32 length= *((uchar*) key++);
+ if (max_length > 255)
+ length+= (*((uchar*) key++)) << 8;
+
+ /* put the length into the record buffer */
+ if (length_bytes == 1)
+ *ptr= (uchar) length;
+ else
+ int2store(ptr, length);
+ memcpy(ptr + length_bytes, key, length);
+ return key + length;
+}
+
+/*
+ Create a packed key that will be used for storage in the index tree
+
+ SYNOPSIS
+ pack_key_from_key_image()
+ to Store packed key segment here
+ from Key segment (as given to index_read())
+ max_length Max length of key
+
+ RETURN
+ end of key storage
+*/
+
+char *Field_varstring::pack_key_from_key_image(char *to, const char *from,
+ uint max_length)
+{
+ /* Key length is always stored as 2 bytes */
+ uint length= uint2korr(from);
+ if (length > max_length)
+ length= max_length;
+ *to++= (char) (length & 255);
+ if (max_length > 255)
+ *to++= (char) (length >> 8);
+ if (length)
+ memcpy(to, from+HA_KEY_BLOB_LENGTH, length);
return to+length;
}
+/*
+ unpack field packed with Field_varstring::pack()
+*/
+
const char *Field_varstring::unpack(char *to, const char *from)
{
uint length;
- if (field_length <= 255)
- {
+ if (length_bytes == 1)
length= (uint) (uchar) (*to= *from++);
- to[1]=0;
- }
else
{
length= uint2korr(from);
@@ -4843,7 +5019,7 @@ const char *Field_varstring::unpack(char *to, const char *from)
to[1]= *from++;
}
if (length)
- memcpy(to+HA_KEY_BLOB_LENGTH, from, length);
+ memcpy(to+ length_bytes, from, length);
return from+length;
}
@@ -4851,12 +5027,11 @@ const char *Field_varstring::unpack(char *to, const char *from)
int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length,
my_bool insert_or_update)
{
- uint a_length;
- uint b_length;
+ uint a_length, b_length;
if (key_length > 255)
{
- a_length=uint2korr(a); a+= HA_KEY_BLOB_LENGTH;
- b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH;
+ a_length=uint2korr(a); a+= 2;
+ b_length=uint2korr(b); b+= 2;
}
else
{
@@ -4873,8 +5048,8 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length,
int Field_varstring::pack_cmp(const char *b, uint key_length,
my_bool insert_or_update)
{
- char *a= ptr+HA_KEY_BLOB_LENGTH;
- uint a_length= uint2korr(ptr);
+ char *a= ptr+ length_bytes;
+ uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
uint b_length;
uint char_length= ((field_charset->mbmaxlen > 1) ?
key_length / field_charset->mbmaxlen : key_length);
@@ -4903,7 +5078,7 @@ int Field_varstring::pack_cmp(const char *b, uint key_length,
uint Field_varstring::packed_col_length(const char *data_ptr, uint length)
{
if (length > 255)
- return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH;
+ return uint2korr(data_ptr)+2;
return (uint) ((uchar) *data_ptr)+1;
}
@@ -4916,13 +5091,14 @@ uint Field_varstring::max_packed_col_length(uint max_length)
void Field_varstring::get_key_image(char *buff, uint length, imagetype type)
{
- uint f_length= uint2korr(ptr);
+ uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
uint char_length= length / field_charset->mbmaxlen;
- char_length= my_charpos(field_charset, ptr, ptr + HA_KEY_BLOB_LENGTH,
+ char_length= my_charpos(field_charset, ptr, ptr + length_bytes,
char_length);
set_if_smaller(f_length, char_length);
+ /* Key is always stored with 2 bytes */
int2store(buff,f_length);
- memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, f_length);
+ memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+length_bytes, f_length);
if (f_length < length)
{
/*
@@ -4936,18 +5112,12 @@ void Field_varstring::get_key_image(char *buff, uint length, imagetype type)
void Field_varstring::set_key_image(char *buff,uint length)
{
- length=uint2korr(buff); // Real length is here
+ length= uint2korr(buff); // Real length is here
(void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length,
field_charset);
}
-int Field_varstring::cmp_binary_offset(uint row_offset)
-{
- return cmp_binary(ptr, ptr+row_offset);
-}
-
-
int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr,
uint32 max_length)
{
@@ -4955,13 +5125,49 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr,
uint diff;
uint32 a_length,b_length;
- a_length= uint2korr(a_ptr);
- b_length= uint2korr(b_ptr);
+ if (length_bytes == 1)
+ {
+ a_length= (uint) (uchar) *a_ptr;
+ b_length= (uint) (uchar) *b_ptr;
+ }
+ else
+ {
+ a_length= uint2korr(a_ptr);
+ b_length= uint2korr(b_ptr);
+ }
set_if_smaller(a_length, max_length);
set_if_smaller(b_length, max_length);
if (a_length != b_length)
return 1;
- return memcmp(a_ptr+2, b_ptr+2, a_length);
+ return memcmp(a_ptr+length_bytes, b_ptr+length_bytes, a_length);
+}
+
+
+Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table)
+{
+ Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table);
+ if (res)
+ res->length_bytes= length_bytes;
+ return res;
+}
+
+
+Field *Field_varstring::new_key_field(MEM_ROOT *root,
+ struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit)
+{
+ Field_varstring *res;
+ if ((res= (Field_varstring*) Field::new_key_field(root,
+ new_table,
+ new_ptr,
+ new_null_ptr,
+ new_null_bit)))
+ {
+ /* Keys length prefixes are always packed with 2 bytes */
+ res->length_bytes= 2;
+ }
+ return res;
}
@@ -5218,18 +5424,6 @@ int Field_blob::cmp(const char *a_ptr, const char *b_ptr)
}
-int Field_blob::cmp_offset(uint row_offset)
-{
- return Field_blob::cmp(ptr,ptr+row_offset);
-}
-
-
-int Field_blob::cmp_binary_offset(uint row_offset)
-{
- return cmp_binary(ptr, ptr+row_offset);
-}
-
-
int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr,
uint32 max_length)
{
@@ -5416,8 +5610,7 @@ const char *Field_blob::unpack(char *to, const char *from)
int Field_blob::pack_cmp(const char *a, const char *b, uint key_length,
my_bool insert_or_update)
{
- uint a_length;
- uint b_length;
+ uint a_length, b_length;
if (key_length > 255)
{
a_length=uint2korr(a); a+=2;
@@ -5523,6 +5716,7 @@ const char *Field_blob::unpack_key(char *to, const char *from, uint max_length)
return from + length;
}
+
/* Create a packed key that will be used for storage from a MySQL key */
char *Field_blob::pack_key_from_key_image(char *to, const char *from,
@@ -6032,6 +6226,264 @@ bool Field_num::eq_def(Field *field)
}
+/*
+ Bit field.
+
+ We store the first 0 - 6 uneven bits among the null bits
+ at the start of the record. The rest bytes are stored in
+ the record itself.
+
+ For example:
+
+ CREATE TABLE t1 (a int, b bit(17), c bit(21) not null, d bit(8));
+ We would store data as follows in the record:
+
+ Byte Bit
+ 1 7 - reserve for delete
+ 6 - null bit for 'a'
+ 5 - null bit for 'b'
+ 4 - first (high) bit of 'b'
+ 3 - first (high) bit of 'c'
+ 2 - second bit of 'c'
+ 1 - third bit of 'c'
+ 0 - forth bit of 'c'
+ 2 7 - firth bit of 'c'
+ 6 - null bit for 'd'
+ 3 - 6 four bytes for 'a'
+ 7 - 8 two bytes for 'b'
+ 9 - 10 two bytes for 'c'
+ 11 one byte for 'd'
+*/
+
+Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ struct st_table *table_arg)
+ : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, table_arg),
+ bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7)
+{
+ /*
+ Ensure that Field::eq() can distinguish between two different bit fields.
+ (two bit fields that are not null, may have same ptr and null_ptr)
+ */
+ if (!null_ptr_arg)
+ null_bit= bit_ofs_arg;
+}
+
+
+Field *Field_bit::new_key_field(MEM_ROOT *root,
+ struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit)
+{
+ Field_bit *res;
+ if ((res= (Field_bit*) Field::new_key_field(root, new_table,
+ new_ptr, new_null_ptr,
+ new_null_bit)))
+ {
+ /* Move bits normally stored in null_pointer to new_ptr */
+ res->bit_ptr= (uchar*) new_ptr;
+ res->bit_ofs= 0;
+ if (bit_len)
+ res->ptr++; // Store rest of data here
+ }
+ return res;
+}
+
+
+void Field_bit::make_field(Send_field *field)
+{
+ /* table_cache_key is not set for temp tables */
+ field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key :
+ "");
+ field->org_table_name= orig_table->real_name;
+ field->table_name= orig_table->table_name;
+ field->col_name= field->org_col_name= field_name;
+ field->charsetnr= charset()->number;
+ field->length= field_length;
+ field->type= type();
+ field->flags= table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags;
+ field->decimals= 0;
+}
+
+
+int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
+{
+ int delta;
+
+ for (; !*from && length; from++, length--); // skip left 0's
+ delta= field_length - length;
+
+ if (delta < -1 ||
+ (delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) ||
+ (!bit_len && delta < 0))
+ {
+ set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len);
+ memset(ptr, 0xff, field_length);
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ return 1;
+ }
+ /* delta is >= -1 here */
+ if (delta > 0)
+ {
+ if (bit_len)
+ clr_rec_bits(bit_ptr, bit_ofs, bit_len);
+ bzero(ptr, delta);
+ memcpy(ptr + delta, from, length);
+ }
+ else if (delta == 0)
+ {
+ if (bit_len)
+ clr_rec_bits(bit_ptr, bit_ofs, bit_len);
+ memcpy(ptr, from, length);
+ }
+ else
+ {
+ if (bit_len)
+ {
+ set_rec_bits((uchar) *from, bit_ptr, bit_ofs, bit_len);
+ from++;
+ }
+ memcpy(ptr, from, field_length);
+ }
+ return 0;
+}
+
+
+int Field_bit::store(double nr)
+{
+ return (Field_bit::store((longlong) nr));
+}
+
+
+int Field_bit::store(longlong nr)
+{
+ char buf[8];
+
+ mi_int8store(buf, nr);
+ return store(buf, 8, NULL);
+}
+
+
+double Field_bit::val_real(void)
+{
+ return (double) Field_bit::val_int();
+}
+
+
+longlong Field_bit::val_int(void)
+{
+ ulonglong bits= 0;
+ if (bit_len)
+ bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
+ bits<<= (field_length * 8);
+
+ switch (field_length) {
+ case 0: return bits;
+ case 1: return bits | (ulonglong) (uchar) ptr[0];
+ case 2: return bits | mi_uint2korr(ptr);
+ case 3: return bits | mi_uint3korr(ptr);
+ case 4: return bits | mi_uint4korr(ptr);
+ case 5: return bits | mi_uint5korr(ptr);
+ case 6: return bits | mi_uint6korr(ptr);
+ case 7: return bits | mi_uint7korr(ptr);
+ default: return mi_uint8korr(ptr + field_length - sizeof(longlong));
+ }
+}
+
+
+String *Field_bit::val_str(String *val_buffer,
+ String *val_ptr __attribute__((unused)))
+{
+ uint length= min(pack_length(), sizeof(longlong));
+ ulonglong bits= val_int();
+
+ val_buffer->alloc(length);
+ memcpy_fixed((char*) val_buffer->ptr(), (char*) &bits, length);
+ val_buffer->length(length);
+ val_buffer->set_charset(&my_charset_bin);
+ return val_buffer;
+}
+
+
+int Field_bit::key_cmp(const byte *str, uint length)
+{
+ if (bit_len)
+ {
+ int flag;
+ uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
+ if ((flag= (int) (bits - *(uchar*) str)))
+ return flag;
+ str++;
+ length--;
+ }
+ return bcmp(ptr, str, length);
+}
+
+
+int Field_bit::cmp_offset(uint row_offset)
+{
+ if (bit_len)
+ {
+ int flag;
+ uchar bits_a= get_rec_bits(bit_ptr, bit_ofs, bit_len);
+ uchar bits_b= get_rec_bits(bit_ptr + row_offset, bit_ofs, bit_len);
+ if ((flag= (int) (bits_a - bits_b)))
+ return flag;
+ }
+ return bcmp(ptr, ptr + row_offset, field_length);
+}
+
+
+void Field_bit::get_key_image(char *buff, uint length, imagetype type)
+{
+ if (bit_len)
+ {
+ uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
+ *buff++= bits;
+ length--;
+ }
+ memcpy(buff, ptr, min(length, field_length));
+}
+
+
+void Field_bit::sql_type(String &res) const
+{
+ CHARSET_INFO *cs= res.charset();
+ ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
+ "bit(%d)",
+ (int) field_length * 8 + bit_len);
+ res.length((uint) length);
+}
+
+
+char *Field_bit::pack(char *to, const char *from, uint max_length)
+{
+ uint length= min(field_length + (bit_len > 0), max_length);
+ if (bit_len)
+ {
+ uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len);
+ *to++= bits;
+ length--;
+ }
+ memcpy(to, from, length);
+ return to + length;
+}
+
+
+const char *Field_bit::unpack(char *to, const char *from)
+{
+ if (bit_len)
+ {
+ set_rec_bits(*from, bit_ptr, bit_ofs, bit_len);
+ from++;
+ }
+ memcpy(to, from, field_length);
+ return from + field_length;
+}
+
+
/*****************************************************************************
Handling of field and create_field
*****************************************************************************/
@@ -6047,15 +6499,22 @@ void create_field::create_length_to_internal_length(void)
case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VARCHAR:
length*= charset->mbmaxlen;
- key_length*= charset->mbmaxlen;
+ key_length= length;
pack_length= calc_pack_length(sql_type, length);
break;
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
+ /* Pack_length already calculated in sql_parse.cc */
length*= charset->mbmaxlen;
+ key_length= pack_length;
+ break;
+ case MYSQL_TYPE_BIT:
+ pack_length= calc_pack_length(sql_type, length);
+ /* We need one extra byte to store the bits we save among the null bits */
+ key_length= pack_length+ test(length & 7);
break;
default:
- /* do nothing */
+ key_length= pack_length= calc_pack_length(sql_type, length);
break;
}
}
@@ -6086,7 +6545,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_STRING:
case FIELD_TYPE_DECIMAL: return (length);
- case MYSQL_TYPE_VARCHAR: return (length+HA_KEY_BLOB_LENGTH);
+ case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2));
case FIELD_TYPE_YEAR:
case FIELD_TYPE_TINY : return 1;
case FIELD_TYPE_SHORT : return 2;
@@ -6108,6 +6567,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr;
case FIELD_TYPE_SET:
case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen
+ case FIELD_TYPE_BIT: return length / 8;
default: return 0;
}
return 0; // Keep compiler happy
@@ -6138,11 +6598,30 @@ Field *make_field(char *ptr, uint32 field_length,
const char *field_name,
struct st_table *table)
{
+ uchar *bit_ptr;
+ uchar bit_offset;
+ LINT_INIT(bit_ptr);
+ LINT_INIT(bit_offset);
+ if (field_type == FIELD_TYPE_BIT)
+ {
+ bit_ptr= null_pos;
+ bit_offset= null_bit;
+ if (f_maybe_null(pack_flag)) // if null field
+ {
+ bit_ptr+= (null_bit == 7); // shift bit_ptr and bit_offset
+ bit_offset= (bit_offset + 1) & 7;
+ }
+ }
+
if (!f_maybe_null(pack_flag))
{
null_pos=0;
null_bit=0;
}
+ else
+ {
+ null_bit= ((uchar) 1) << null_bit;
+ }
switch (field_type)
{
@@ -6166,7 +6645,9 @@ Field *make_field(char *ptr, uint32 field_length,
unireg_check, field_name, table,
field_charset);
if (field_type == MYSQL_TYPE_VARCHAR)
- return new Field_varstring(ptr,field_length,null_pos,null_bit,
+ return new Field_varstring(ptr,field_length,
+ HA_VARCHAR_PACKLENGTH(field_length),
+ null_pos,null_bit,
unireg_check, field_name, table,
field_charset);
return 0; // Error
@@ -6264,6 +6745,9 @@ Field *make_field(char *ptr, uint32 field_length,
unireg_check, field_name, table, field_charset);
case FIELD_TYPE_NULL:
return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset);
+ case FIELD_TYPE_BIT:
+ return new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
+ bit_offset, unireg_check, field_name, table);
default: // Impossible (Wrong version)
break;
}
@@ -6313,15 +6797,17 @@ create_field::create_field(Field *old_field,Field *orig_field)
case MYSQL_TYPE_SET:
case MYSQL_TYPE_VARCHAR:
case MYSQL_TYPE_VAR_STRING:
- /* These are corrected in create_length_to_internal_length */
+ /* This is corrected in create_length_to_internal_length */
length= (length+charset->mbmaxlen-1) / charset->mbmaxlen;
- key_length/= charset->mbmaxlen;
break;
#ifdef HAVE_SPATIAL
case FIELD_TYPE_GEOMETRY:
geom_type= ((Field_geom*)old_field)->geom_type;
break;
#endif
+ case FIELD_TYPE_BIT:
+ length= ((Field_bit *) old_field)->bit_len + length * 8;
+ break;
default:
break;
}
diff --git a/sql/field.h b/sql/field.h
index 4353780f9a4..6ce5cf2a526 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -80,7 +80,7 @@ public:
FIELD_CAST_TIMESTAMP, FIELD_CAST_YEAR, FIELD_CAST_DATE, FIELD_CAST_NEWDATE,
FIELD_CAST_TIME, FIELD_CAST_DATETIME,
FIELD_CAST_STRING, FIELD_CAST_VARSTRING, FIELD_CAST_BLOB,
- FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET
+ FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET, FIELD_CAST_BIT
};
utype unireg_check;
@@ -96,7 +96,7 @@ public:
virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0;
virtual int store(double nr)=0;
virtual int store(longlong nr)=0;
- virtual void store_time(TIME *ltime,timestamp_type t_type);
+ virtual int store_time(TIME *ltime, timestamp_type t_type);
virtual double val_real(void)=0;
virtual longlong val_int(void)=0;
inline String *val_str(String *str) { return val_str(str, str); }
@@ -113,9 +113,14 @@ public:
This trickery is used to decrease a number of malloc calls.
*/
virtual String *val_str(String*,String *)=0;
+ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag);
virtual Item_result result_type () const=0;
virtual Item_result cmp_type () const { return result_type(); }
- bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; }
+ bool eq(Field *field)
+ {
+ return (ptr == field->ptr && null_ptr == field->null_ptr &&
+ null_bit == field->null_bit);
+ }
virtual bool eq_def(Field *field);
virtual uint32 pack_length() const { return (uint32) field_length; }
virtual void reset(void) { bzero(ptr,pack_length()); }
@@ -139,10 +144,9 @@ public:
virtual int cmp(const char *,const char *)=0;
virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L)
{ return memcmp(a,b,pack_length()); }
- virtual int cmp_offset(uint row_offset)
- { return memcmp(ptr,ptr+row_offset,pack_length()); }
- virtual int cmp_binary_offset(uint row_offset)
- { return memcmp(ptr,ptr+row_offset,pack_length()); }
+ int cmp_offset(uint row_offset) { return cmp(ptr,ptr+row_offset); }
+ int cmp_binary_offset(uint row_offset)
+ { return cmp_binary(ptr, ptr+row_offset); };
virtual int key_cmp(const byte *a,const byte *b)
{ return cmp((char*) a,(char*) b); }
virtual int key_cmp(const byte *str, uint length)
@@ -185,7 +189,10 @@ public:
virtual bool can_be_compared_as_longlong() const { return FALSE; }
virtual void free() {}
virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table);
- inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
+ virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit);
+ virtual void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
}
@@ -211,6 +218,15 @@ public:
ptr-=row_offset;
return tmp;
}
+
+ inline String *val_str(String *str, char *new_ptr)
+ {
+ char *old_ptr= ptr;
+ ptr= new_ptr;
+ val_str(str);
+ ptr= old_ptr;
+ return str;
+ }
virtual bool send_binary(Protocol *protocol);
virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0)
{
@@ -782,7 +798,7 @@ public:
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr);
- void store_time(TIME *ltime,timestamp_type type);
+ int store_time(TIME *ltime, timestamp_type type);
void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; }
double val_real(void);
longlong val_int(void);
@@ -815,6 +831,7 @@ public:
enum_field_types type() const { return FIELD_TYPE_TIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; }
enum Item_result cmp_type () const { return INT_RESULT; }
+ int store_time(TIME *ltime, timestamp_type type);
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr);
@@ -855,7 +872,7 @@ public:
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr);
- void store_time(TIME *ltime,timestamp_type type);
+ int store_time(TIME *ltime, timestamp_type type);
void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; }
double val_real(void);
longlong val_int(void);
@@ -925,26 +942,31 @@ public:
class Field_varstring :public Field_str {
public:
- Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg,
+ /* Store number of bytes used to store length (1 or 2) */
+ uint32 length_bytes;
+ Field_varstring(char *ptr_arg,
+ uint32 len_arg, uint length_bytes_arg,
+ uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, table_arg, cs),
+ length_bytes(length_bytes_arg)
{}
Field_varstring(uint32 len_arg,bool maybe_null_arg,
const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
:Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg, cs)
+ NONE, field_name_arg, table_arg, cs),
+ length_bytes(len_arg < 256 ? 1 :2)
{}
enum_field_types type() const { return MYSQL_TYPE_VARCHAR; }
- enum ha_base_keytype key_type() const
- { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; }
+ enum ha_base_keytype key_type() const;
bool zero_pack() const { return 0; }
- void reset(void) { bzero(ptr,field_length+2); }
- uint32 pack_length() const { return (uint32) field_length+2; }
+ void reset(void) { bzero(ptr,field_length+length_bytes); }
+ uint32 pack_length() const { return (uint32) field_length+length_bytes; }
uint32 key_length() const { return (uint32) field_length; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(longlong nr);
@@ -959,12 +981,13 @@ public:
void sql_type(String &str) const;
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
char *pack_key(char *to, const char *from, uint max_length);
+ char *pack_key_from_key_image(char* to, const char *from, uint max_length);
const char *unpack(char* to, const char *from);
+ const char *unpack_key(char* to, const char *from, uint max_length);
int pack_cmp(const char *a, const char *b, uint key_length,
my_bool insert_or_update);
int pack_cmp(const char *b, uint key_length,my_bool insert_or_update);
int cmp_binary(const char *a,const char *b, uint32 max_length=~0L);
- int cmp_binary_offset(uint row_offset);
int key_cmp(const byte *,const byte*);
int key_cmp(const byte *str, uint length);
uint packed_col_length(const char *to, uint length);
@@ -974,6 +997,10 @@ public:
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; }
+ Field *new_field(MEM_ROOT *root, struct st_table *new_table);
+ Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit);
};
@@ -996,7 +1023,7 @@ public:
}
enum_field_types type() const { return FIELD_TYPE_BLOB;}
enum ha_base_keytype key_type() const
- { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; }
+ { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr);
@@ -1005,9 +1032,7 @@ public:
String *val_str(String*,String *);
int cmp(const char *,const char*);
int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length);
- int cmp_offset(uint offset);
int cmp_binary(const char *a,const char *b, uint32 max_length=~0L);
- int cmp_binary_offset(uint row_offset);
int key_cmp(const byte *,const byte*);
int key_cmp(const byte *str, uint length);
uint32 key_length() const { return 0; }
@@ -1054,9 +1079,9 @@ public:
return 0;
}
char *pack(char *to, const char *from, uint max_length= ~(uint) 0);
- const char *unpack(char *to, const char *from);
char *pack_key(char *to, const char *from, uint max_length);
char *pack_key_from_key_image(char* to, const char *from, uint max_length);
+ const char *unpack(char *to, const char *from);
const char *unpack_key(char* to, const char *from, uint max_length);
int pack_cmp(const char *a, const char *b, uint key_length,
my_bool insert_or_update);
@@ -1091,7 +1116,7 @@ public:
:Field_blob(len_arg, maybe_null_arg, field_name_arg,
table_arg, &my_charset_bin)
{ geom_type= geom_type_arg; }
- enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; }
+ enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; }
enum_field_types type() const { return FIELD_TYPE_GEOMETRY; }
void sql_type(String &str) const;
int store(const char *to, uint length, CHARSET_INFO *charset);
@@ -1172,6 +1197,52 @@ public:
};
+class Field_bit :public Field {
+public:
+ uchar *bit_ptr; // position in record where 'uneven' bits store
+ uchar bit_ofs; // offset to 'uneven' high bits
+ uint bit_len; // number of 'uneven' high bits
+ Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ struct st_table *table_arg);
+ enum_field_types type() const { return FIELD_TYPE_BIT; }
+ enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
+ uint32 key_length() const { return (uint32) field_length + (bit_len > 0); }
+ uint32 max_length() { return (uint32) field_length + (bit_len > 0); }
+ uint size_of() const { return sizeof(*this); }
+ Item_result result_type () const { return INT_RESULT; }
+ void make_field(Send_field *);
+ void reset(void) { bzero(ptr, field_length); }
+ int store(const char *to, uint length, CHARSET_INFO *charset);
+ int store(double nr);
+ int store(longlong nr);
+ double val_real(void);
+ longlong val_int(void);
+ String *val_str(String*, String *);
+ int cmp(const char *a, const char *b)
+ { return cmp_binary(a, b); }
+ int key_cmp(const byte *a, const byte *b)
+ { return cmp_binary(a, b); }
+ int key_cmp(const byte *str, uint length);
+ int cmp_offset(uint row_offset);
+ void get_key_image(char *buff, uint length, imagetype type);
+ void set_key_image(char *buff, uint length)
+ { Field_bit::store(buff, length, &my_charset_bin); }
+ void sort_string(char *buff, uint length)
+ { get_key_image(buff, length, itRAW); }
+ uint32 pack_length() const
+ { return (uint32) field_length + (bit_len > 0); }
+ void sql_type(String &str) const;
+ field_cast_enum field_cast_type() { return FIELD_CAST_BIT; }
+ char *pack(char *to, const char *from, uint max_length=~(uint) 0);
+ const char *unpack(char* to, const char *from);
+ Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ char *new_ptr, uchar *new_null_ptr,
+ uint new_null_bit);
+};
+
+
/*
Create field class for CREATE TABLE
*/
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index f6cc851639a..9fd4f0228b3 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -305,7 +305,8 @@ static void do_field_string(Copy_field *copy)
char buff[MAX_FIELD_WIDTH];
copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset());
copy->from_field->val_str(&copy->tmp);
- copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset());
+ copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),
+ copy->tmp.charset());
}
@@ -350,7 +351,23 @@ static void do_expand_string(Copy_field *copy)
copy->to_length-copy->from_length, ' ');
}
-static void do_varstring(Copy_field *copy)
+
+static void do_varstring1(Copy_field *copy)
+{
+ uint length= (uint) *(uchar*) copy->from_ptr;
+ if (length > copy->to_length- 1)
+ {
+ length=copy->to_length - 1;
+ if (current_thd->count_cuted_fields)
+ copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_DATA_TRUNCATED, 1);
+ }
+ *(uchar*) copy->to_ptr= (uchar) length;
+ memcpy(copy->to_ptr+1, copy->from_ptr + 1, length);
+}
+
+
+static void do_varstring2(Copy_field *copy)
{
uint length=uint2korr(copy->from_ptr);
if (length > copy->to_length- HA_KEY_BLOB_LENGTH)
@@ -485,6 +502,9 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
}
else
{
+ if (to->real_type() == FIELD_TYPE_BIT ||
+ from->real_type() == FIELD_TYPE_BIT)
+ return do_field_int;
// Check if identical fields
if (from->result_type() == STRING_RESULT)
{
@@ -505,9 +525,15 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
}
else if (to->charset() != from->charset())
return do_field_string;
- else if (to->real_type() == MYSQL_TYPE_VARCHAR && to_length !=
- from_length)
- return do_varstring;
+ else if (to->real_type() == MYSQL_TYPE_VARCHAR)
+ {
+ if (((Field_varstring*) to)->length_bytes !=
+ ((Field_varstring*) from)->length_bytes)
+ return do_field_string;
+ if (to_length != from_length)
+ return (((Field_varstring*) to)->length_bytes == 1 ?
+ do_varstring1 : do_varstring2);
+ }
else if (to_length < from_length)
return do_cut_string;
else if (to_length > from_length)
@@ -587,6 +613,12 @@ void field_conv(Field *to,Field *from)
char buff[MAX_FIELD_WIDTH];
String result(buff,sizeof(buff),from->charset());
from->val_str(&result);
+ /*
+ We use c_ptr_quick() here to make it easier if to is a float/double
+ as the conversion routines will do a copy of the result doesn't
+ end with \0. Can be replaced with .ptr() when we have our own
+ string->double conversion.
+ */
to->store(result.c_ptr_quick(),result.length(),from->charset());
}
else if (from->result_type() == REAL_RESULT)
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 6cb83624eff..322126ff47b 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -356,7 +356,8 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const
}
switch (table->key_info[idx].key_part[i].field->key_type()) {
case HA_KEYTYPE_TEXT:
- case HA_KEYTYPE_VARTEXT:
+ case HA_KEYTYPE_VARTEXT1:
+ case HA_KEYTYPE_VARTEXT2:
/*
As BDB stores only one copy of equal strings, we can't use key read
on these. Binary collations do support key read though.
@@ -391,6 +392,7 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key)
KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts;
uint key_length=new_key->size;
+ DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size);
for (; key_part != end && (int) key_length > 0; key_part++)
{
int cmp;
@@ -745,11 +747,11 @@ void ha_berkeley::unpack_row(char *record, DBT *row)
void ha_berkeley::unpack_key(char *record, DBT *key, uint index)
{
- KEY *key_info=table->key_info+index;
+ KEY *key_info= table->key_info+index;
KEY_PART_INFO *key_part= key_info->key_part,
- *end=key_part+key_info->key_parts;
+ *end= key_part+key_info->key_parts;
+ char *pos= (char*) key->data;
- char *pos=(char*) key->data;
for (; key_part != end; key_part++)
{
if (key_part->null_bit)
@@ -773,8 +775,10 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index)
/*
- Create a packed key from from a row
- This will never fail as the key buffer is pre allocated.
+ Create a packed key from a row. This key will be written as such
+ to the index tree.
+
+ This will never fail as the key buffer is pre-allocated.
*/
DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
@@ -820,7 +824,10 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
/*
- Create a packed key from from a MySQL unpacked key
+ Create a packed key from from a MySQL unpacked key (like the one that is
+ sent from the index_read()
+
+ This key is to be used to read a row
*/
DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
@@ -1457,7 +1464,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row,
int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status);
+ table->in_use->status_var.ha_read_key_count++;
DBUG_ENTER("index_read_idx");
current_row.flags=DB_DBT_REALLOC;
active_index=MAX_KEY;
@@ -1476,10 +1483,9 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
int error;
KEY *key_info= &table->key_info[active_index];
int do_prev= 0;
-
DBUG_ENTER("ha_berkeley::index_read");
- statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status);
+ table->in_use->status_var.ha_read_key_count++;
bzero((char*) &row,sizeof(row));
if (find_flag == HA_READ_BEFORE_KEY)
{
@@ -1679,6 +1685,7 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos)
pos+=key_part->field->packed_col_length((char*) pos,key_part->length);
to->size= (uint) (pos- (byte*) to->data);
}
+ DBUG_DUMP("key", (char*) to->data, to->size);
return to;
}
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
new file mode 100755
index 00000000000..3118833a47e
--- /dev/null
+++ b/sql/ha_federated.cc
@@ -0,0 +1,1722 @@
+/* Copyright (C) 2004 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+
+ MySQL Federated Storage Engine
+
+ ha_federated.cc - MySQL Federated Storage Engine
+ Patrick Galbraith and Brian Aker, 2004
+
+ This is a handler which uses a remote database as the data file, as
+ opposed to a handler like MyISAM, which uses .MYD files locally.
+
+ How this handler works
+ ----------------------------------
+ Normal database files are local and as such: You create a table called
+ 'users', a file such as 'users.MYD' is created. A handler reads, inserts,
+ deletes, updates data in this file. The data is stored in particular format,
+ so to read, that data has to be parsed into fields, to write, fields have to
+ be stored in this format to write to this data file.
+
+ With MySQL Federated storage engine, there will be no local files for each
+ table's data (such as .MYD). A remote database will store the data that would
+ normally be in this file. This will necessitate the use of MySQL client API
+ to read, delete, update, insert this data. The data will have to be retrieve
+ via an SQL call "SELECT * FROM users". Then, to read this data, it will have
+ to be retrieved via mysql_fetch_row one row at a time, then converted from
+ the column in this select into the format that the handler expects.
+
+ The create table will simply create the .frm file, and within the
+ "CREATE TABLE" SQL, there SHALL be any of the following :
+
+ comment=scheme://username:password@hostname:port/database/tablename
+ comment=scheme://username@hostname/database/tablename
+ comment=scheme://username:password@hostname/database/tablename
+ comment=scheme://username:password@hostname/database/tablename
+
+ An example would be:
+
+ comment=mysql://username:password@hostname:port/database/tablename
+
+ ***IMPORTANT***
+
+ Only 'mysql://' is supported at this release.
+
+
+ This comment connection string is necessary for the handler to be
+ able to connect to the remote server.
+
+
+ The basic flow is this:
+
+ SQL calls issues locally ->
+ mysql handler API (data in handler format) ->
+ mysql client API (data converted to SQL calls) ->
+ remote database -> mysql client API ->
+ convert result sets (if any) to handler format ->
+ handler API -> results or rows affected to local
+
+ What this handler does and doesn't support
+ ------------------------------------------
+ * Tables MUST be created on the remote server prior to any action on those
+ tables via the handler, first version. IMPORTANT: IF you MUST use the
+ federated storage engine type on the REMOTE end, MAKE SURE [ :) ] That
+ the table you connect to IS NOT a table pointing BACK to your ORIGNAL
+ table! You know and have heard the screaching of audio feedback? You
+ know putting two mirror in front of each other how the reflection
+ continues for eternity? Well, need I say more?!
+ * There will not be support for transactions.
+ * There is no way for the handler to know if the database on the remote end
+ has changed. The reason for this is that this database has to work like a
+ data file that would never be written to by anything other than the
+ database. The integrity of the data in the local table could be breached
+ if there was any change to the remote database.
+ * Support for SELECT, INSERT, UPDATE , DELETE, indexes.
+ * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls.
+ * Prepared statements will not be used in the first implementation, it
+ remains to to be seen whether the limited subset of the client API for the
+ server supports this.
+ * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its
+ implementation.
+ * This will not work with the query cache.
+
+ Method calls
+
+ A two column table, with one record:
+
+ (SELECT)
+
+ "SELECT * FROM foo"
+ ha_federated::info
+ ha_federated::scan_time:
+ ha_federated::rnd_init: share->select_query SELECT * FROM foo
+ ha_federated::extra
+
+ <for every row of data retrieved>
+ ha_federated::rnd_next
+ ha_federated::convert_row_to_internal_format
+ ha_federated::rnd_next
+ </for every row of data retrieved>
+
+ ha_federated::rnd_end
+ ha_federated::extra
+ ha_federated::reset
+
+ (INSERT)
+
+ "INSERT INTO foo (id, ts) VALUES (2, now());"
+
+ ha_federated::write_row
+
+ <for every field/column>
+ ha_federated::quote_data
+ ha_federated::quote_data
+ </for every field/column>
+
+ ha_federated::reset
+
+ (UPDATE)
+
+ "UPDATE foo SET ts = now() WHERE id = 1;"
+
+ ha_federated::index_init
+ ha_federated::index_read
+ ha_federated::index_read_idx
+ ha_federated::quote_data
+ ha_federated::rnd_next
+ ha_federated::convert_row_to_internal_format
+ ha_federated::update_row
+
+ <quote 3 cols, new and old data>
+ <ha_federated::quote_data
+ <ha_federated::quote_data
+ <ha_federated::quote_data
+ <ha_federated::quote_data
+ <ha_federated::quote_data
+ <ha_federated::quote_data
+ </quote 3 cols, new and old data>
+
+ ha_federated::extra
+ ha_federated::extra
+ ha_federated::extra
+ ha_federated::external_lock
+ ha_federated::reset
+
+
+ How do I use this handler?
+ --------------------------
+ First of all, you need to build this storage engine:
+
+ ./configure --with-federated-storage-engine
+ make
+
+ Next, to use this handler, it's very simple. You must
+ have two databases running, either both on the same host, or
+ on different hosts.
+
+ One the server that will be connecting to the remote
+ host (client), you create your table as such:
+
+ CREATE TABLE test_table (
+ id int(20) NOT NULL auto_increment,
+ name varchar(32) NOT NULL default '',
+ other int(20) NOT NULL default '0',
+ PRIMARY KEY (id),
+ KEY name (name),
+ KEY other_key (other))
+ ENGINE="FEDERATED"
+ DEFAULT CHARSET=latin1
+ COMMENT='root@127.0.0.1:9306/federated/test_federated';
+
+ Notice the "COMMENT" and "ENGINE" field? This is where you
+ respectively set the engine type, "FEDERATED" and remote
+ host information, this being the database your 'client' database
+ will connect to and use as the "data file". Obviously, the remote
+ database is running on port 9306, so you want to start up your other
+ database so that it is indeed on port 9306, and your federated
+ database on a port other than that. In my setup, I use port 5554
+ for federated, and port 5555 for the remote.
+
+ Then, on the remote database:
+
+ CREATE TABLE test_table (
+ id int(20) NOT NULL auto_increment,
+ name varchar(32) NOT NULL default '',
+ other int(20) NOT NULL default '0',
+ PRIMARY KEY (id),
+ KEY name (name),
+ KEY other_key (other))
+ ENGINE="<NAME>" <-- whatever you want, or not specify
+ DEFAULT CHARSET=latin1 ;
+
+ This table is exactly the same (and must be exactly the same),
+ except that it is not using the federated handler and does
+ not need the URL.
+
+
+ How to see the handler in action
+ --------------------------------
+
+ When developing this handler, I compiled the federated database with
+ debugging:
+
+ ./configure --with-federated-storage-engine
+ --prefix=/home/mysql/mysql-build/federated/ --with-debug
+
+ Once compiled, I did a 'make install' (not for the purpose of installing
+ the binary, but to install all the files the binary expects to see in the
+ diretory I specified in the build with --prefix,
+ "/home/mysql/mysql-build/federated".
+
+ Then, I started the remote server:
+
+ /usr/local/mysql/bin/mysqld_safe
+ --user=mysql --log=/tmp/mysqld.5555.log -P 5555
+
+ Then, I went back to the directory containing the newly compiled mysqld,
+ <builddir>/sql/, started up gdb:
+
+ gdb ./mysqld
+
+ Then, withn the (gdb) prompt:
+ (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug
+
+ Next, I open several windows for each:
+
+ 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed
+ 2. Tail the SQL calls to the remote database: tail -f /tmp/mysqld.5555.log
+ 3. A window with a client open to the federated server on port 5554
+ 4. A window with a client open to the federated server on port 5555
+
+ I would create a table on the client to the remote server on port
+ 5555, and then to the federated server on port 5554. At this point,
+ I would run whatever queries I wanted to on the federated server,
+ just always remembering that whatever changes I wanted to make on
+ the table, or if I created new tables, that I would have to do that
+ on the remote server.
+
+ Another thing to look for is 'show variables' to show you that you have
+ support for federated handler support:
+
+ show variables like '%federat%'
+
+ and:
+
+ show storage engines;
+
+ Both should display the federated storage handler.
+
+
+ Testing
+ -------
+
+ There is a test for MySQL Federated Storage Handler in ./mysql-test/t,
+ federatedd.test It starts both a slave and master database using
+ the same setup that the replication tests use, with the exception that
+ it turns off replication, and sets replication to ignore the test tables.
+ After ensuring that you actually do have support for the federated storage
+ handler, numerous queries/inserts/updates/deletes are run, many derived
+ from the MyISAM tests, plus som other tests which were meant to reveal
+ any issues that would be most likely to affect this handler. All tests
+ should work! ;)
+
+ To run these tests, go into ./mysql-test (based in the directory you
+ built the server in)
+
+ ./mysql-test-run federatedd
+
+ To run the test, or if you want to run the test and have debug info:
+
+ ./mysql-test-run --debug federated
+
+ This will run the test in debug mode, and you can view the trace and
+ log files in the ./mysql-test/var/log directory
+
+ ls -l mysql-test/var/log/
+ -rw-r--r-- 1 patg patg 17 4 Dec 12:27 current_test
+ -rw-r--r-- 1 patg patg 692 4 Dec 12:52 manager.log
+ -rw-rw---- 1 patg patg 21246 4 Dec 12:51 master-bin.000001
+ -rw-rw---- 1 patg patg 68 4 Dec 12:28 master-bin.index
+ -rw-r--r-- 1 patg patg 1620 4 Dec 12:51 master.err
+ -rw-rw---- 1 patg patg 23179 4 Dec 12:51 master.log
+ -rw-rw---- 1 patg patg 16696550 4 Dec 12:51 master.trace
+ -rw-r--r-- 1 patg patg 0 4 Dec 12:28 mysqltest-time
+ -rw-r--r-- 1 patg patg 2024051 4 Dec 12:51 mysqltest.trace
+ -rw-rw---- 1 patg patg 94992 4 Dec 12:51 slave-bin.000001
+ -rw-rw---- 1 patg patg 67 4 Dec 12:28 slave-bin.index
+ -rw-rw---- 1 patg patg 249 4 Dec 12:52 slave-relay-bin.000003
+ -rw-rw---- 1 patg patg 73 4 Dec 12:28 slave-relay-bin.index
+ -rw-r--r-- 1 patg patg 1349 4 Dec 12:51 slave.err
+ -rw-rw---- 1 patg patg 96206 4 Dec 12:52 slave.log
+ -rw-rw---- 1 patg patg 15706355 4 Dec 12:51 slave.trace
+ -rw-r--r-- 1 patg patg 0 4 Dec 12:51 warnings
+
+ Of course, again, you can tail the trace log:
+
+ tail -f mysql-test/var/log/master.trace |grep ha_fed
+
+ As well as the slave query log:
+
+ tail -f mysql-test/var/log/slave.log
+
+ Files that comprise the test suit
+ ---------------------------------
+ mysql-test/t/federated.test
+ mysql-test/r/federated.result
+ mysql-test/r/have_federated_db.require
+ mysql-test/include/have_federated_db.inc
+
+
+ Other tidbits
+ -------------
+
+ These were the files that were modified or created for this
+ Federated handler to work:
+
+ ./configure.in
+ ./sql/Makefile.am
+ ./config/ac_macros/ha_federated.m4
+ ./sql/handler.cc
+ ./sql/mysqld.cc
+ ./sql/set_var.cc
+ ./sql/field.h
+ ./sql/sql_string.h
+ ./mysql-test/mysql-test-run(.sh)
+ ./mysql-test/t/federated.test
+ ./mysql-test/r/federated.result
+ ./mysql-test/r/have_federated_db.require
+ ./mysql-test/include/have_federated_db.inc
+ ./sql/ha_federated.cc
+ ./sql/ha_federated.h
+
+*/
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include <mysql_priv.h>
+
+#ifdef HAVE_FEDERATED_DB
+#include "ha_federated.h"
+#define MAX_REMOTE_SIZE IO_SIZE
+/* Variables for federated share methods */
+static HASH federated_open_tables; // Hash used to track open tables
+pthread_mutex_t federated_mutex; // This is the mutex we use to init the hash
+static int federated_init= 0; // Variable for checking the init state of hash
+
+/*
+ Function we use in the creation of our hash to get key.
+*/
+static byte* federated_get_key(FEDERATED_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+/*
+ Parse connection info from table->comment
+
+ SYNOPSIS
+ parse_url()
+ share pointer to FEDERATED share
+ table pointer to current TABLE class
+
+ DESCRIPTION
+ populates the share with information about the connection
+ to the remote database that will serve as the data source.
+ This string must be specified (currently) in the "comment" field,
+ listed in the CREATE TABLE statement.
+
+ This string MUST be in the format of any of these:
+
+scheme://username:password@hostname:port/database/table
+scheme://username@hostname/database/table
+scheme://username@hostname:port/database/table
+scheme://username:password@hostname/database/table
+
+ An Example:
+
+ mysql://joe:joespass@192.168.1.111:9308/federated/testtable
+
+ ***IMPORTANT***
+ Currently, only "mysql://" is supported.
+
+ 'password' and 'port' are both optional.
+
+ RETURN VALUE
+ 0 success
+ -1 failure, wrong string format
+
+*/
+int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag)
+{
+ DBUG_ENTER("ha_federated::parse_url");
+
+ // This either get set or will remain the same.
+ share->port= 0;
+ uint error_num= table_create_flag ? ER_CANT_CREATE_TABLE : ER_CONNECT_TO_MASTER ;
+
+ share->scheme= my_strdup(table->comment, MYF(0));
+
+
+ if (share->username= strstr(share->scheme, "://"))
+ {
+ share->scheme[share->username - share->scheme] = '\0';
+ if (strcmp(share->scheme, "mysql") != 0)
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("The federated handler currently only supports connecting\
+ to a MySQL database!!!\n"));
+ my_error(error_num, MYF(0),
+ "ERROR: federated handler only supports remote 'mysql://' database");
+ DBUG_RETURN(-1);
+ }
+ share->username+= 3;
+
+ if (share->hostname= strchr(share->username, '@'))
+ {
+ share->username[share->hostname - share->username]= '\0';
+ share->hostname++;
+
+ if (share->password= strchr(share->username, ':'))
+ {
+ share->username[share->password - share->username]= '\0';
+ share->password++;
+ share->username= share->username;
+ // make sure there isn't an extra / or @
+ if (strchr(share->password, '/') || strchr(share->hostname, '@'))
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ /*
+ Found that if the string is:
+user:@hostname:port/database/table
+Then password is a null string, so set to NULL
+ */
+ if (share->password[0] == '\0')
+ share->password= NULL;
+ }
+ else
+ share->username= share->username;
+
+ // make sure there isn't an extra / or @
+ if (strchr(share->username, '/') || strchr(share->hostname, '@'))
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+
+ if (share->database= strchr(share->hostname, '/'))
+ {
+ share->hostname[share->database - share->hostname]= '\0';
+ share->database++;
+
+ if (share->sport= strchr(share->hostname, ':'))
+ {
+ share->hostname[share->sport - share->hostname]= '\0';
+ share->sport++;
+ if (share->sport[0] == '\0')
+ share->sport= NULL;
+ else
+ share->port= atoi(share->sport);
+ }
+
+ if (share->table_base_name= strchr(share->database, '/'))
+ {
+ share->database[share->table_base_name - share->database]= '\0';
+ share->table_base_name++;
+ }
+ else
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ }
+ else
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ // make sure there's not an extra /
+ if (strchr(share->table_base_name, '/'))
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ if (share->hostname[0] == '\0')
+ share->hostname= NULL;
+
+ DBUG_PRINT("ha_federated::parse_url",
+ ("scheme %s username %s password %s \
+ hostname %s port %d database %s tablename %s\n",
+ share->scheme, share->username, share->password, share->hostname,
+ share->port, share->database, share->table_base_name));
+ }
+ else
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ }
+ else
+ {
+ DBUG_PRINT("ha_federated::parse_url",
+ ("this connection string is not in the correct format!!!\n"));
+ my_error(error_num, MYF(0),
+ "this connection string is not in the correct format!!!\n");
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ Convert MySQL result set row to handler internal format
+
+ SYNOPSIS
+ convert_row_to_internal_format()
+ record Byte pointer to record
+ row MySQL result set row from fetchrow()
+
+ DESCRIPTION
+ This method simply iterates through a row returned via fetchrow with
+ values from a successful SELECT , and then stores each column's value
+ in the field object via the field object pointer (pointing to the table's
+ array of field object pointers). This is how the handler needs the data
+ to be stored to then return results back to the user
+
+ RETURN VALUE
+ 0 After fields have had field values stored from record
+ */
+uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
+{
+ unsigned long len;
+ int x= 0;
+ DBUG_ENTER("ha_federated::convert_row_to_internal_format");
+
+ // Question this
+ memset(record, 0, table->null_bytes);
+
+ for (Field **field=table->field; *field ; field++, x++)
+ {
+ if (!row[x])
+ (*field)->set_null();
+ else
+ /*
+ changed system_charset_info to default_charset_info because
+ testing revealed that german text was not being retrieved properly
+ */
+ (*field)->store(row[x], strlen(row[x]), &my_charset_bin);
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ SYNOPSIS
+ quote_data()
+ unquoted_string Pointer pointing to the value of a field
+ field MySQL Field pointer to field being checked for type
+
+ DESCRIPTION
+ Simple method that passes the field type to the method "type_quote"
+ To get a true/false value as to whether the value in string1 needs
+ to be enclosed with quotes. This ensures that values in the final
+ sql statement to be passed to the remote server will be quoted properly
+
+ RETURN_VALUE
+ void Immediately - if string doesn't need quote
+ void Upon prepending/appending quotes on each side of variable
+
+*/
+void ha_federated::quote_data(String *unquoted_string, Field *field )
+{
+ char escaped_string[IO_SIZE];
+ char *unquoted_string_buffer;
+
+ unquoted_string_buffer= unquoted_string->c_ptr_quick();
+
+ int quote_flag;
+ DBUG_ENTER("ha_federated::quote_data");
+ // this is the same call that mysql_real_escape_string() calls
+ escape_string_for_mysql(&my_charset_bin, (char *)escaped_string,
+ unquoted_string->c_ptr_quick(), unquoted_string->length());
+
+ DBUG_PRINT("ha_federated::quote_data",
+ ("escape_string_for_mysql unescaped %s escaped %s",
+ unquoted_string->c_ptr_quick(), escaped_string));
+
+ if (field->is_null())
+ {
+ DBUG_PRINT("ha_federated::quote_data",
+ ("NULL, no quoted needed for unquoted_string %s, returning.",
+ unquoted_string->c_ptr_quick()));
+ DBUG_VOID_RETURN;
+ }
+
+ quote_flag= type_quote(field->type());
+
+ if (quote_flag == 0)
+ {
+ DBUG_PRINT("ha_federated::quote_data",
+ ("quote flag 0 no quoted needed for unquoted_string %s, returning.",
+ unquoted_string->c_ptr_quick()));
+ DBUG_VOID_RETURN;
+ }
+ else
+ {
+ // reset string, then re-append with quotes and escaped values
+ unquoted_string->length(0);
+ unquoted_string->append("'");
+ unquoted_string->append((char *)escaped_string);
+ unquoted_string->append("'");
+ }
+ DBUG_PRINT("ha_federated::quote_data",
+ ("FINAL quote_flag %d unquoted_string %s escaped_string %s",
+ quote_flag, unquoted_string->c_ptr_quick(), escaped_string));
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Quote a field type if needed
+
+ SYNOPSIS
+ ha_federated::type_quote
+ int field Enumerated field type number
+
+ DESCRIPTION
+ Simple method to give true/false whether a field should be quoted.
+ Used when constructing INSERT and UPDATE queries to the remote server
+ see write_row and update_row
+
+ RETURN VALUE
+ 0 if value is of type NOT needing quotes
+ 1 if value is of type needing quotes
+*/
+uint ha_federated::type_quote(int type)
+{
+ DBUG_ENTER("ha_federated::type_quote");
+ DBUG_PRINT("ha_federated::type_quote", ("field type %d", type));
+
+ switch(type) {
+ //FIX this is a bug, fix when kernel is fixed
+ case MYSQL_TYPE_VARCHAR :
+ case FIELD_TYPE_STRING :
+ case FIELD_TYPE_VAR_STRING :
+ case FIELD_TYPE_YEAR :
+ case FIELD_TYPE_NEWDATE :
+ case FIELD_TYPE_TIME :
+ case FIELD_TYPE_TIMESTAMP :
+ case FIELD_TYPE_DATE :
+ case FIELD_TYPE_DATETIME :
+ case FIELD_TYPE_TINY_BLOB :
+ case FIELD_TYPE_BLOB :
+ case FIELD_TYPE_MEDIUM_BLOB :
+ case FIELD_TYPE_LONG_BLOB :
+ case FIELD_TYPE_GEOMETRY :
+ DBUG_RETURN(1);
+
+ case FIELD_TYPE_DECIMAL :
+ case FIELD_TYPE_TINY :
+ case FIELD_TYPE_SHORT :
+ case FIELD_TYPE_INT24 :
+ case FIELD_TYPE_LONG :
+ case FIELD_TYPE_FLOAT :
+ case FIELD_TYPE_DOUBLE :
+ case FIELD_TYPE_LONGLONG :
+ case FIELD_TYPE_NULL :
+ case FIELD_TYPE_SET :
+ case FIELD_TYPE_ENUM :
+ DBUG_RETURN(0);
+
+ default: DBUG_RETURN(0);
+ }
+ DBUG_RETURN(0);
+}
+
+int load_conn_info(FEDERATED_SHARE *share, TABLE *table)
+{
+ DBUG_ENTER("ha_federated::load_conn_info");
+ int retcode;
+
+ retcode= parse_url(share, table, 0);
+
+ if (retcode < 0)
+ {
+ DBUG_PRINT("ha_federated::load_conn_info",
+ ("retcode %d, setting defaults", retcode));
+ /* sanity checks to make sure all needed pieces are present */
+ if (!share->port)
+ {
+ if (strcmp(share->hostname, "localhost") == 0)
+ share->socket= my_strdup("/tmp/mysql.sock",MYF(0));
+ else
+ share->port= 3306;
+ }
+ }
+ DBUG_PRINT("ha_federated::load_conn_info",
+ ("returned from retcode %d", retcode));
+
+ DBUG_RETURN(retcode);
+}
+
+/*
+ Example of simple lock controls. The "share" it creates is structure we will
+ pass to each federated handler. Do you have to have one of these? Well, you
+ have pieces that are used for locking, and they are needed to function.
+*/
+static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ FEDERATED_SHARE *share;
+ // FIX : need to redo
+ //String query;
+ char query_buffer[IO_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ query.length(0);
+
+ uint table_name_length, table_base_name_length;
+ char *tmp_table_name, *tmp_table_base_name, *table_base_name, *select_query;
+
+ // share->table_name has the file location - we want the actual table's
+ // name!
+ table_base_name= table->table_name;
+ DBUG_PRINT("ha_federated::get_share",("table_name %s", table_base_name));
+ /*
+ So why does this exist? There is no way currently to init a storage engine.
+ Innodb and BDB both have modifications to the server to allow them to
+ do this. Since you will not want to do this, this is probably the next
+ best method.
+ */
+ if (!federated_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!federated_init)
+ {
+ federated_init++;
+ VOID(pthread_mutex_init(&federated_mutex,MY_MUTEX_INIT_FAST));
+ (void) hash_init(&federated_open_tables,system_charset_info,32,0,0,
+ (hash_get_key) federated_get_key,0,0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&federated_mutex);
+ table_name_length= (uint) strlen(table_name);
+ table_base_name_length= (uint) strlen(table_base_name);
+
+ if (!(share= (FEDERATED_SHARE*) hash_search(&federated_open_tables,
+ (byte*) table_name,
+ table_name_length)))
+ {
+ query.set_charset(system_charset_info);
+ query.append("SELECT * FROM ");
+ query.append(table_base_name);
+
+ if (!(share= (FEDERATED_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_table_name, table_name_length+1,
+ &tmp_table_base_name, table_base_name_length+1,
+ &select_query, query.length()+1,
+ NullS)))
+ {
+ pthread_mutex_unlock(&federated_mutex);
+ return NULL;
+ }
+
+ load_conn_info(share, table);
+ share->use_count= 0;
+ share->table_name_length= table_name_length;
+ share->table_name= tmp_table_name;
+ share->table_base_name_length= table_base_name_length;
+ share->table_base_name= tmp_table_base_name;
+ share->select_query= select_query;
+ strmov(share->table_name,table_name);
+ strmov(share->table_base_name,table_base_name);
+ strmov(share->select_query,query.c_ptr_quick());
+ DBUG_PRINT("ha_federated::get_share",("share->select_query %s", share->select_query));
+ if (my_hash_insert(&federated_open_tables, (byte*) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&federated_mutex);
+
+ return share;
+
+error2:
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+error:
+ pthread_mutex_unlock(&federated_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls. We call this whenever we close a table.
+ If the table had the last reference to the share then we
+ free memory associated with it.
+*/
+static int free_share(FEDERATED_SHARE *share)
+{
+ pthread_mutex_lock(&federated_mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&federated_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&federated_mutex);
+
+ return 0;
+}
+
+
+/*
+ If frm_error() is called then we will use this to to find out
+ what file extentions exist for the storage engine. This is
+ also used by the default rename_table and delete_table method
+ in handler.cc.
+*/
+const char **ha_federated::bas_ext() const
+{ static const char *ext[]= { NullS }; return ext; }
+
+
+/*
+ Used for opening tables. The name will be the name of the file.
+ A table is opened when it needs to be opened. For instance
+ when a request comes in for a select on the table (tables are not
+ open and closed for each request, they are cached).
+
+ Called from handler.cc by handler::ha_open(). The server opens
+ all tables by calling ha_open() which then calls the handler
+ specific open().
+*/
+int ha_federated::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_federated::open");
+ int rc;
+
+ if (!(share= get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+
+ /* Connect to remote database mysql_real_connect() */
+ mysql= mysql_init(0);
+ DBUG_PRINT("ha_federated::open",("hostname %s", share->hostname));
+ DBUG_PRINT("ha_federated::open",("username %s", share->username));
+ DBUG_PRINT("ha_federated::open",("password %s", share->password));
+ DBUG_PRINT("ha_federated::open",("database %s", share->database));
+ DBUG_PRINT("ha_federated::open",("port %d", share->port));
+ if (!mysql_real_connect(mysql,
+ share->hostname,
+ share->username,
+ share->password,
+ share->database,
+ share->port,
+ NULL,
+ 0))
+ {
+ my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql));
+ DBUG_RETURN(ER_CONNECT_TO_MASTER);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Closes a table. We call the free_share() function to free any resources
+ that we have allocated in the "shared" structure.
+
+ Called from sql_base.cc, sql_select.cc, and table.cc.
+ In sql_select.cc it is only used to close up temporary tables or during
+ the process where a temporary table is converted over to being a
+ myisam table.
+ For sql_base.cc look at close_data_tables().
+*/
+int ha_federated::close(void)
+{
+ DBUG_ENTER("ha_federated::close");
+ /* Disconnect from mysql */
+ mysql_close(mysql);
+ DBUG_RETURN(free_share(share));
+
+}
+
+/*
+
+ Checks if a field in a record is SQL NULL.
+
+ SYNOPSIS
+ field_in_record_is_null()
+ table TABLE pointer, MySQL table object
+ field Field pointer, MySQL field object
+ record char pointer, contains record
+
+ DESCRIPTION
+ This method uses the record format information in table to track
+ the null bit in record.
+
+ RETURN VALUE
+ 1 if NULL
+ 0 otherwise
+*/
+inline uint field_in_record_is_null (
+ TABLE* table, /* in: MySQL table object */
+ Field* field, /* in: MySQL field object */
+ char* record) /* in: a row in MySQL format */
+{
+ int null_offset;
+ DBUG_ENTER("ha_federated::field_in_record_is_null");
+
+ if (!field->null_ptr)
+ DBUG_RETURN(0);
+
+ null_offset= (uint) ((char*) field->null_ptr - (char*) table->record[0]);
+
+ if (record[null_offset] & field->null_bit)
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ write_row() inserts a row. No extra() hint is given currently if a bulk load
+ is happeneding. buf() is a byte array of data. You can use the field
+ information to extract the data from the native byte array type.
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
+
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+*/
+int ha_federated::write_row(byte * buf)
+{
+ int x= 0, num_fields= 0;
+ ulong current_query_id= 1;
+ ulong tmp_query_id;
+ int all_fields_have_same_query_id= 1;
+
+ char insert_buffer[IO_SIZE];
+ char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE];
+
+ // The main insert query string
+ String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
+ insert_string.length(0);
+ // The string containing the values to be added to the insert
+ String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
+ values_string.length(0);
+ // The actual value of the field, to be added to the values_string
+ String insert_field_value_string(insert_field_value_buffer,
+ sizeof(insert_field_value_buffer), &my_charset_bin);
+ insert_field_value_string.length(0);
+
+ DBUG_ENTER("ha_federated::write_row");
+ /*
+ I want to use this and the next line, but the repository needs to be
+ updated to do so
+ */
+ statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
+
+ /*
+ get the current query id - the fields that we add to the insert
+ statement to send to the remote will not be appended unless they match
+ this query id
+ */
+ current_query_id= table->in_use->query_id;
+ DBUG_PRINT("ha_federated::write_row", ("current query id %d",
+ current_query_id));
+
+ // start off our string
+ insert_string.append("INSERT INTO ");
+ insert_string.append(share->table_base_name);
+ // start both our field and field values strings
+ insert_string.append(" (");
+ values_string.append(" VALUES (");
+
+ /*
+ Even if one field is different, all_fields_same_query_id can't remain
+ 0 if it remains 0, then that means no fields were specified in the query
+ such as in the case of INSERT INTO table VALUES (val1, val2, valN)
+ */
+ for (Field **field= table->field; *field ; field++, x++)
+ {
+ if (x > 0 && tmp_query_id != (*field)->query_id)
+ all_fields_have_same_query_id= 0;
+
+ tmp_query_id= (*field)->query_id;
+ }
+ /*
+ loop through the field pointer array, add any fields to both the values
+ list and the fields list that match the current query id
+ */
+ for (Field **field= table->field; *field ; field++, x++)
+ {
+ DBUG_PRINT("ha_federated::write_row", ("field type %d", (*field)->type()));
+ // if there is a query id and if it's equal to the current query id
+ if ( ((*field)->query_id && (*field)->query_id == current_query_id )
+ || all_fields_have_same_query_id)
+ {
+ num_fields++;
+
+ if ((*field)->is_null())
+ {
+ DBUG_PRINT("ha_federated::write_row",
+ ("current query id %d field is_null query id %d",
+ current_query_id, (*field)->query_id));
+ insert_field_value_string.append("NULL");
+ }
+ else
+ {
+ DBUG_PRINT("ha_federated::write_row",
+ ("current query id %d field is not null query ID %d",
+ current_query_id, (*field)->query_id));
+ (*field)->val_str(&insert_field_value_string);
+ }
+ // append the field name
+ insert_string.append((*field)->field_name);
+
+ // quote these fields if they require it
+ quote_data(&insert_field_value_string, *field);
+ // append the value
+ values_string.append(insert_field_value_string);
+ insert_field_value_string.length(0);
+
+ // append commas between both fields and fieldnames
+ insert_string.append(',');
+ values_string.append(',');
+ DBUG_PRINT("ha_federated::write_row",
+ ("insert_string %s values_string %s insert_field_value_string %s",
+ insert_string.c_ptr_quick(), values_string.c_ptr_quick(), insert_field_value_string.c_ptr_quick()));
+
+ }
+ }
+
+ /*
+ chop of the trailing comma, or if there were no fields, a '('
+ So, "INSERT INTO foo (" becomes "INSERT INTO foo "
+ or, with fields, "INSERT INTO foo (field1, field2," becomes
+ "INSERT INTO foo (field1, field2"
+ */
+ insert_string.chop();
+
+
+ /*
+ if there were no fields, we don't want to add a closing paren
+ AND, we don't want to chop off the last char '('
+ insert will be "INSERT INTO t1 VALUES ();"
+ */
+ DBUG_PRINT("ha_federated::write_row",("x %d num fields %d",
+ x, num_fields));
+ if (num_fields > 0)
+ {
+ // chops off leading commas
+ values_string.chop();
+ insert_string.append(')');
+ }
+ // we always want to append this, even if there aren't any fields
+ values_string.append(')');
+
+ // add the values
+ insert_string.append(values_string);
+
+ DBUG_PRINT("ha_federated::write_row",("insert query %s",
+ insert_string.c_ptr_quick()));
+
+ if (mysql_real_query(mysql, insert_string.c_ptr_quick(),
+ insert_string.length()))
+ {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Yes, update_row() does what you expect, it updates a row. old_data will have
+ the previous row record in it, while new_data will have the newest data in
+ it.
+
+ Keep in mind that the server can do updates based on ordering if an ORDER BY
+ clause was used. Consecutive ordering is not guarenteed.
+ Currently new_data will not have an updated auto_increament record, or
+ and updated timestamp field. You can do these for federated by doing these:
+ if (table->timestamp_on_update_now)
+ update_timestamp(new_row+table->timestamp_on_update_now-1);
+ if (table->next_number_field && record == table->record[0])
+ update_auto_increment();
+
+ Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+*/
+int ha_federated::update_row(
+ const byte * old_data,
+ byte * new_data
+ )
+{
+ uint x= 0;
+ uint has_a_primary_key;
+ int primary_key_field_num;
+ char old_field_value_buffer[IO_SIZE], new_field_value_buffer[IO_SIZE];
+ char update_buffer[IO_SIZE], where_buffer[IO_SIZE];
+
+ // stores the value to be replaced of the field were are updating
+ String old_field_value(old_field_value_buffer, sizeof(old_field_value_buffer), &my_charset_bin);
+ old_field_value.length(0);
+ // stores the new value of the field
+ String new_field_value(new_field_value_buffer, sizeof(new_field_value_buffer), &my_charset_bin);
+ new_field_value.length(0);
+ // stores the update query
+ String update_string(update_buffer, sizeof(update_buffer), &my_charset_bin);
+ update_string.length(0);
+ // stores the WHERE clause
+ String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin);
+ where_string.length(0);
+
+ DBUG_ENTER("ha_federated::update_row");
+
+
+ has_a_primary_key= table->primary_key == 0 ? 1 : 0;
+ primary_key_field_num= has_a_primary_key ?
+ table->key_info[table->primary_key].key_part->fieldnr -1 : -1;
+ if (has_a_primary_key)
+ DBUG_PRINT("ha_federated::update_row", ("has a primary key"));
+
+ update_string.append("UPDATE ");
+ update_string.append(share->table_base_name);
+ update_string.append(" SET ");
+
+/*
+ In this loop, we want to match column names to values being inserted
+ (while building INSERT statement).
+
+ Iterate through table->field (new data) and share->old_filed (old_data)
+ using the same index to created an SQL UPDATE statement, new data is
+ used to create SET field=value and old data is used to create WHERE
+ field=oldvalue
+ */
+
+ for (Field **field= table->field ; *field ; field++, x++)
+ {
+ /*
+ In all of these tests for 'has_a_primary_key', what I'm trying to
+ accomplish is to only use the primary key in the WHERE clause if the
+ table has a primary key, as opposed to a table without a primary key
+ in which case we have to use all the fields to create a WHERE clause
+ using the old/current values, as well as adding a LIMIT statement
+ */
+ if (has_a_primary_key)
+ {
+ if (x == primary_key_field_num)
+ where_string.append((*field)->field_name);
+ }
+ else
+ where_string.append((*field)->field_name);
+
+ update_string.append((*field)->field_name);
+ update_string.append('=');
+
+ if ((*field)->is_null())
+ new_field_value.append("NULL");
+ else
+ {
+ // otherwise =
+ (*field)->val_str(&new_field_value);
+ quote_data(&new_field_value, *field);
+
+ if ( has_a_primary_key )
+ {
+ if (x == primary_key_field_num)
+ where_string.append("=");
+ }
+ else
+ if (! field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append("=");
+ }
+
+ if ( has_a_primary_key)
+ {
+ if (x == primary_key_field_num)
+ {
+ (*field)->val_str(&old_field_value,
+ (char *)(old_data + (*field)->offset()));
+ quote_data(&old_field_value, *field);
+ where_string.append(old_field_value);
+ }
+ }
+ else
+ {
+ if (field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append(" IS NULL ");
+ else
+ {
+ (*field)->val_str(&old_field_value,
+ (char *)(old_data + (*field)->offset()));
+ quote_data(&old_field_value, *field);
+ where_string.append(old_field_value);
+ }
+ }
+ update_string.append(new_field_value);
+ new_field_value.length(0);
+
+ if (x+1 < table->fields)
+ {
+ update_string.append(", ");
+ if (! has_a_primary_key)
+ where_string.append(" AND ");
+ }
+ old_field_value.length(0);
+ }
+ update_string.append(" WHERE ");
+ update_string.append(where_string.c_ptr_quick());
+ if (! has_a_primary_key)
+ update_string.append(" LIMIT 1");
+
+ DBUG_PRINT("ha_federated::update_row", ("Final update query: %s",
+ update_string.c_ptr_quick()));
+ if (mysql_real_query(mysql, update_string.c_ptr_quick(),
+ update_string.length()))
+ {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+
+
+ DBUG_RETURN(0);
+}
+
+/*
+ This will delete a row. 'buf' will contain a copy of the row to be deleted.
+ The server will call this right after the current row has been called (from
+ either a previous rnd_nexT() or index call).
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
+ Keep in mind that the server does no guarentee consecutive deletions.
+ ORDER BY clauses can be used.
+
+ Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+ Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+ it is used for removing duplicates while in insert it is used for REPLACE
+ calls.
+*/
+int ha_federated::delete_row(const byte * buf)
+{
+ int x= 0;
+ char delete_buffer[IO_SIZE];
+ char data_buffer[IO_SIZE];
+
+ String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
+ delete_string.length(0);
+ String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
+ data_string.length(0);
+
+ DBUG_ENTER("ha_federated::delete_row");
+
+ delete_string.append("DELETE FROM ");
+ delete_string.append(share->table_base_name);
+ delete_string.append(" WHERE ");
+
+ for (Field **field= table->field; *field; field++, x++)
+ {
+ delete_string.append((*field)->field_name);
+
+ if ((*field)->is_null())
+ {
+ delete_string.append(" IS ");
+ data_string.append("NULL");
+ }
+ else
+ {
+ delete_string.append("=");
+ (*field)->val_str(&data_string);
+ quote_data(&data_string, *field);
+ }
+
+ delete_string.append(data_string);
+ data_string.length(0);
+
+ if (x+1 < table->fields)
+ delete_string.append(" AND ");
+ }
+
+ delete_string.append(" LIMIT 1");
+ DBUG_PRINT("ha_federated::delete_row",
+ ("Delete sql: %s", delete_string.c_ptr_quick()));
+ if ( mysql_real_query(mysql, delete_string.c_ptr_quick(),
+ delete_string.length()))
+ {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Positions an index cursor to the index specified in the handle. Fetches the
+ row if available. If the key value is null, begin at the first key of the
+ index. This method, which is called in the case of an SQL statement having
+ a WHERE clause on a non-primary key index, simply calls index_read_idx.
+*/
+int ha_federated::index_read(byte * buf, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_federated::index_read");
+ DBUG_RETURN(index_read_idx(buf, active_index, key, key_len, find_flag));
+}
+
+
+/*
+ Positions an index cursor to the index specified in key. Fetches the
+ row if any. This is only used to read whole keys.
+
+ This method is called via index_read in the case of a WHERE clause using
+ a regular non-primary key index, OR is called DIRECTLY when the WHERE clause
+ uses a PRIMARY KEY index.
+*/
+int ha_federated::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ char index_value[IO_SIZE];
+ String index_string(index_value, sizeof(index_value), &my_charset_bin);
+ index_string.length(0);
+
+ char sql_query_buffer[IO_SIZE];
+ String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin);
+ sql_query.length(0);
+
+ DBUG_ENTER("ha_federated::index_read_idx");
+ statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status);
+
+ index_string.length(0);
+ sql_query.length(0);
+
+ sql_query.append(share->select_query);
+ sql_query.append(" WHERE ");
+ sql_query.append(table->key_info[index].key_part->field->field_name);
+ sql_query.append(" = ");
+
+ table->key_info[index].key_part->field->val_str(&index_string, (char *)(key));
+ quote_data(&index_string, table->key_info[index].key_part->field);
+ sql_query.append(index_string);
+
+ DBUG_PRINT("ha_federated::index_read_idx",
+ ("sql_query %s", sql_query.c_ptr_quick()));
+
+ if (mysql_real_query(mysql, sql_query.c_ptr_quick(), sql_query.length()))
+ {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+ result= mysql_store_result(mysql);
+
+ if (!result)
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+
+ if (mysql_errno(mysql))
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(mysql_errno(mysql));
+ }
+
+ DBUG_RETURN(rnd_next(buf));
+}
+
+/*
+ Initialized at each key walk (called multiple times unlike ::rnd_init())
+*/
+int ha_federated::index_init(uint keynr)
+{
+ int error;
+ DBUG_ENTER("ha_federated::index_init");
+ DBUG_PRINT("ha_federated::index_init",
+ ("table: '%s' key: %d", table->real_name, keynr));
+ active_index= keynr;
+ DBUG_RETURN(0);
+}
+
+/*
+ Used to read forward through the index.
+*/
+int ha_federated::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_federated::index_next");
+ DBUG_RETURN(rnd_next(buf));
+}
+
+
+/*
+ rnd_init() is called when the system wants the storage engine to do a table
+ scan.
+
+ This is the method that gets data for the SELECT calls.
+
+ See the federated in the introduction at the top of this file to see when
+ rnd_init() is called.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+int ha_federated::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_federated::rnd_init");
+ int num_fields, rows;
+
+ DBUG_PRINT("ha_federated::rnd_init",
+ ("share->select_query %s", share->select_query));
+ if (mysql_real_query(mysql, share->select_query, strlen(share->select_query)))
+ {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+ result= mysql_store_result(mysql);
+
+ if (mysql_errno(mysql))
+ DBUG_RETURN(mysql_errno(mysql));
+ DBUG_RETURN(0);
+}
+
+int ha_federated::rnd_end()
+{
+ DBUG_ENTER("ha_federated::rnd_end");
+ mysql_free_result(result);
+ DBUG_RETURN(index_end());
+}
+
+int ha_federated::index_end(void)
+{
+ DBUG_ENTER("ha_federated::index_end");
+ active_index= MAX_KEY;
+ DBUG_RETURN(0);
+}
+
+/*
+ This is called for each row of the table scan. When you run out of records
+ you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
+ The Field structure for the table is the key to getting data into buf
+ in a manner that will allow the server to understand it.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+int ha_federated::rnd_next(byte *buf)
+{
+ MYSQL_ROW row;
+ DBUG_ENTER("ha_federated::rnd_next");
+
+ // Fetch a row, insert it back in a row format.
+ current_position= result->data_cursor;
+ if (! (row= mysql_fetch_row(result)))
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ DBUG_RETURN(convert_row_to_internal_format(buf,row));
+}
+
+
+/*
+ 'position()' is called after each call to rnd_next() if the data needs to be
+ ordered. You can do something like the following to store the position:
+ ha_store_ptr(ref, ref_length, current_position);
+
+ The server uses ref to store data. ref_length in the above case is the size
+ needed to store current_position. ref is just a byte array that the server
+ will maintain. If you are using offsets to mark rows, then current_position
+ should be the offset. If it is a primary key like in BDB, then it needs to
+ be a primary key.
+
+ Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+*/
+void ha_federated::position(const byte *record)
+{
+ DBUG_ENTER("ha_federated::position");
+ //ha_store_ptr Add seek storage
+ ha_store_ptr(ref, ref_length, current_position);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This is like rnd_next, but you are given a position to use to determine the
+ row. The position will be of the type that you stored in ref. You can use
+ ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved
+ when position() was called.
+
+ This method is required for an ORDER BY.
+
+ Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
+*/
+int ha_federated::rnd_pos(byte * buf, byte *pos)
+{
+ DBUG_ENTER("ha_federated::rnd_pos");
+ statistic_increment(table->in_use->status_var.ha_read_rnd_count,&LOCK_status);
+ current_position= ha_get_ptr(pos,ref_length);
+ result->current_row= 0;
+ result->data_cursor= current_position;
+ DBUG_RETURN(rnd_next(buf));
+}
+
+
+/*
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+ Another note, you will probably want to have the following in your
+ code:
+ if (records < 2)
+ records = 2;
+ The reason is that the server will optimize for cases of only a single
+ record. If in a table scan you don't know the number of records
+ it will probably be better to set records to two so you can return
+ as many records as you need.
+ Along with records a few more variables you may wish to set are:
+ records
+ deleted
+ data_file_length
+ index_file_length
+ delete_length
+ check_time
+ Take a look at the public variables in handler.h for more information.
+
+ Called in:
+ filesort.cc
+ ha_heap.cc
+ item_sum.cc
+ opt_sum.cc
+ sql_delete.cc
+ sql_delete.cc
+ sql_derived.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_table.cc
+ sql_union.cc
+ sql_update.cc
+
+*/
+// FIX: later version provide better information to the optimizer
+void ha_federated::info(uint flag)
+{
+ DBUG_ENTER("ha_federated::info");
+ records= 10000; // Fake!
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Used to delete all rows in a table. Both for cases of truncate and
+ for cases where the optimizer realizes that all rows will be
+ removed as a result of a SQL statement.
+
+ Called from item_sum.cc by Item_func_group_concat::clear(),
+ Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+ Called from sql_delete.cc by mysql_delete().
+ Called from sql_select.cc by JOIN::reinit().
+ Called from sql_union.cc by st_select_lex_unit::exec().
+*/
+int ha_federated::delete_all_rows()
+{
+ DBUG_ENTER("ha_federated::delete_all_rows");
+
+ char query_buffer[IO_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append("TRUNCATE ");
+ query.append(share->table_base_name);
+
+ if (mysql_real_query(mysql, query.c_ptr_quick(), query.length())) {
+ my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql));
+ DBUG_RETURN(ER_QUERY_ON_MASTER);
+ }
+
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+}
+
+
+/*
+ The idea with handler::store_lock() is the following:
+
+ The statement decided which locks we should need for the table
+ for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+ read locks.
+
+ Before adding the lock into the table lock handler (see thr_lock.c)
+ mysqld calls store lock with the requested locks. Store lock can now
+ modify a write lock to a read lock (or some other lock), ignore the
+ lock (if we don't want to use MySQL table locks at all) or add locks
+ for many tables (like we do when we are using a MERGE handler).
+
+ Berkeley DB for federated changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ (which signals that we are doing WRITES, but we are still allowing other
+ reader's and writer's.
+
+ When releasing locks, store_lock() are also called. In this case one
+ usually doesn't have to do anything.
+
+ In some exceptional cases MySQL may send a request for a TL_IGNORE;
+ This means that we are requesting the same lock as last time and this
+ should also be ignored. (This may happen when someone does a flush
+ table when we have opened a part of the tables, in which case mysqld
+ closes and reopens the tables and tries to get the same locks at last
+ time). In the future we will probably try to remove this.
+
+ Called from lock.cc by get_lock_data().
+*/
+THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ {
+ /*
+ Here is where we get into the guts of a row level lock.
+ If TL_UNLOCK is set
+ If we are not doing a LOCK TABLE or DISCARD/IMPORT
+ TABLESPACE, then allow multiple writers
+ */
+
+ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+ lock_type <= TL_WRITE) && !thd->in_lock_tables
+ && !thd->tablespace_op)
+ lock_type= TL_WRITE_ALLOW_WRITE;
+
+ /*
+ In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
+ MySQL would use the lock TL_READ_NO_INSERT on t2, and that
+ would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
+ to t2. Convert the lock to a normal read lock to allow
+ concurrent inserts to t2.
+ */
+
+ if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
+ lock_type= TL_READ;
+
+ lock.type= lock_type;
+ }
+
+ *to++= &lock;
+
+ return to;
+}
+
+/*
+ create() does nothing, since we have no local setup of our own.
+ FUTURE: We should potentially connect to the remote database and
+ create tables if they do not exist.
+*/
+int ha_federated::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ DBUG_ENTER("ha_federated::create");
+ int retcode;
+ FEDERATED_SHARE *tmp;
+ retcode= parse_url(tmp, table_arg, 1);
+ if (retcode < 0)
+ {
+ DBUG_PRINT("ha_federated::create",
+ ("ERROR: on table creation for %s called parse_url, retcode %d",
+ create_info->data_file_name, retcode));
+ DBUG_RETURN(ER_CANT_CREATE_TABLE);
+ }
+ DBUG_RETURN(0);
+}
+#endif /* HAVE_FEDERATED_DB */
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
new file mode 100755
index 00000000000..c11960a836f
--- /dev/null
+++ b/sql/ha_federated.h
@@ -0,0 +1,177 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Please read ha_exmple.cc before reading this file.
+ Please keep in mind that the federated storage engine implements all methods
+ that are required to be implemented. handler.h has a full list of methods
+ that you can implement.
+*/
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+#include <mysql.h>
+//#include <client.h>
+
+/*
+ FEDERATED_SHARE is a structure that will be shared amoung all open handlers
+ The example implements the minimum of what you will probably need.
+*/
+//FIX document
+typedef struct st_federated_share {
+ char *table_name;
+ char *table_base_name;
+ // the primary select query to be used in rnd_init
+ char *select_query;
+ // remote host info, parse_url supplies
+ char *scheme;
+ char *hostname;
+ char *username;
+ char *password;
+ char *database;
+ char *table;
+ char *socket;
+ char *sport;
+ int port;
+ uint table_name_length,table_base_name_length,use_count;
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+} FEDERATED_SHARE;
+
+/*
+ Class definition for the storage engine
+*/
+class ha_federated: public handler
+{
+ THR_LOCK_DATA lock; /* MySQL lock */
+ FEDERATED_SHARE *share; /* Shared lock info */
+ MYSQL *mysql;
+ MYSQL_RES *result;
+ uint ref_length;
+ uint fetch_num; // stores the fetch num
+ MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
+
+private:
+ /*
+ return 0 on success
+ return errorcode otherwise
+ */
+ //FIX
+ uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row);
+ uint type_quote(int type);
+ void quote_data(String *string1, Field *field);
+
+public:
+ ha_federated(TABLE *table): handler(table),
+ mysql(0),
+ ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
+ {
+ }
+ ~ha_federated()
+ {
+ }
+ /* The name that will be used for display purposes */
+ const char *table_type() const { return "FEDERATED"; }
+ /*
+ The name of the index type that will be used for display
+ don't implement this method unless you really have indexes
+ */
+ const char *index_type(uint inx) { return "REMOTE"; }
+ const char **bas_ext() const;
+ /*
+ This is a list of flags that says what the storage engine
+ implements. The current table flags are documented in
+ handler.h
+ Serg: Double check these (Brian)
+ // FIX add blob support
+ */
+ ulong table_flags() const
+ {
+ return (HA_TABLE_SCAN_ON_INDEX | HA_NOT_EXACT_COUNT |
+ HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_AUTO_PART_KEY |
+ HA_TABLE_SCAN_ON_INDEX);
+ }
+ /*
+ This is a bitmap of flags that says how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero
+ here.
+
+ part is the key part to check. First key part is 0
+ If all_parts it's set, MySQL want to know the flags for the combined
+ index up to and including 'part'.
+ */
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return (HA_READ_NEXT);
+ // return (HA_READ_NEXT | HA_ONLY_WHOLE_INDEX);
+ }
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_supported_keys() const { return MAX_KEY; }
+ uint max_supported_key_parts() const { return 1024; }
+ uint max_supported_key_length() const { return 1024; }
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time() { DBUG_PRINT("ha_federated::scan_time", ("rows %d", records)); return (double)(records*2); }
+ /*
+ The next method will never be called if you do not implement indexes.
+ */
+ virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
+
+ /*
+ Everything below are methods that we implment in ha_federated.cc.
+
+ Most of these methods are not obligatory, skip them and
+ MySQL will treat them as not implemented
+ */
+ int open(const char *name, int mode, uint test_if_locked); // required
+ int close(void); // required
+
+ int write_row(byte * buf);
+ int update_row(const byte * old_data, byte * new_data);
+ int delete_row(const byte * buf);
+ int index_init(uint keynr);
+ int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_next(byte * buf);
+ int index_end();
+ /*
+ unlike index_init(), rnd_init() can be called two times
+ without rnd_end() in between (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan
+ (e.g if rnd_init allocates the cursor, second call should
+ position it to the start of the table, no need to deallocate
+ and allocate it again
+ */
+ int rnd_init(bool scan); //required
+ int rnd_end();
+ int rnd_next(byte *buf); //required
+ int rnd_pos(byte * buf, byte *pos); //required
+ void position(const byte *record); //required
+ void info(uint); //required
+
+ int delete_all_rows(void);
+ int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info); //required
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type); //required
+};
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 60555d51402..1556a18bfca 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -488,8 +488,10 @@ int ha_heap::create(const char *name, TABLE *table_arg,
else
{
if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT &&
- seg->type != HA_KEYTYPE_VARTEXT &&
- seg->type != HA_KEYTYPE_VARBINARY)
+ seg->type != HA_KEYTYPE_VARTEXT1 &&
+ seg->type != HA_KEYTYPE_VARTEXT2 &&
+ seg->type != HA_KEYTYPE_VARBINARY1 &&
+ seg->type != HA_KEYTYPE_VARBINARY2)
seg->type= HA_KEYTYPE_BINARY;
}
seg->start= (uint) key_part->offset;
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 6b7f5e05ee2..61481de6e10 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -88,6 +88,7 @@ extern "C" {
uint innobase_init_flags = 0;
ulong innobase_cache_size = 0;
+ulong innobase_large_page_size = 0;
/* The default values for the following, type long, start-up parameters
are declared in mysqld.cc: */
@@ -116,6 +117,9 @@ values */
uint innobase_flush_log_at_trx_commit = 1;
my_bool innobase_log_archive = FALSE;/* unused */
+my_bool innobase_use_doublewrite = TRUE;
+my_bool innobase_use_checksums = TRUE;
+my_bool innobase_use_large_pages = FALSE;
my_bool innobase_use_native_aio = FALSE;
my_bool innobase_fast_shutdown = TRUE;
my_bool innobase_very_fast_shutdown = FALSE; /* this can be set to
@@ -1123,6 +1127,12 @@ innobase_init(void)
srv_fast_shutdown = (ibool) innobase_fast_shutdown;
+ srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
+ srv_use_checksums = (ibool) innobase_use_checksums;
+
+ os_use_large_pages = (ibool) innobase_use_large_pages;
+ os_large_page_size = (ulint) innobase_large_page_size;
+
srv_file_per_table = (ibool) innobase_file_per_table;
srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog;
@@ -1326,7 +1336,7 @@ innobase_commit(
&innodb_dummy_stmt_trx_handle: the latter means
that the current SQL statement ended */
{
- trx_t* trx;
+ trx_t* trx;
DBUG_ENTER("innobase_commit");
DBUG_PRINT("trans", ("ending transaction"));
@@ -3831,6 +3841,7 @@ ha_innobase::create(
char name2[FN_REFLEN];
char norm_name[FN_REFLEN];
THD *thd= current_thd;
+ ib_longlong auto_inc_value;
DBUG_ENTER("ha_innobase::create");
@@ -4001,6 +4012,20 @@ ha_innobase::create(
DBUG_ASSERT(innobase_table != 0);
+ if ((thd->lex->create_info.used_fields & HA_CREATE_USED_AUTO) &&
+ (thd->lex->create_info.auto_increment_value != 0)) {
+
+ /* Query was ALTER TABLE...AUTO_INCREMENT = x; or
+ CREATE TABLE ...AUTO_INCREMENT = x; Find out a table
+ definition from the dictionary and get the current value
+ of the auto increment field. Set a new value to the
+ auto increment field if the value is greater than the
+ maximum value in the column. */
+
+ auto_inc_value = thd->lex->create_info.auto_increment_value;
+ dict_table_autoinc_initialize(innobase_table, auto_inc_value);
+ }
+
/* Tell the InnoDB server that there might be work for
utility threads: */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 214e905a327..bb8823fd1bb 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -182,6 +182,7 @@ extern struct show_var_st innodb_status_variables[];
extern uint innobase_init_flags, innobase_lock_type;
extern uint innobase_flush_log_at_trx_commit;
extern ulong innobase_cache_size;
+extern ulong innobase_large_page_size;
extern char *innobase_home, *innobase_tmpdir, *innobase_logdir;
extern long innobase_lock_scan_time;
extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
@@ -196,6 +197,9 @@ extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
extern char *innobase_unix_file_flush_method;
/* The following variables have to be my_bool for SHOW VARIABLES to work */
extern my_bool innobase_log_archive,
+ innobase_use_doublewrite,
+ innobase_use_checksums,
+ innobase_use_large_pages,
innobase_use_native_aio, innobase_fast_shutdown,
innobase_file_per_table, innobase_locks_unsafe_for_binlog,
innobase_create_status_file;
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 726647cd131..c23a728b715 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -926,8 +926,11 @@ int ha_myisam::enable_indexes(uint mode)
{
sql_print_warning("Warning: Enabling keys got errno %d, retrying",
my_errno);
+ thd->clear_error();
param.testflag&= ~(T_REP_BY_SORT | T_QUICK);
error= (repair(thd,param,0) != HA_ADMIN_OK);
+ if (!error && thd->net.report_error)
+ error= HA_ERR_CRASHED;
}
info(HA_STATUS_CONST);
thd->proc_info=save_proc_info;
@@ -1406,7 +1409,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
keydef[i].seg[j].type= (int) type;
keydef[i].seg[j].start= pos->key_part[j].offset;
keydef[i].seg[j].length= pos->key_part[j].length;
- keydef[i].seg[j].bit_start=keydef[i].seg[j].bit_end=0;
+ keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end=
+ keydef[i].seg[j].bit_pos= 0;
keydef[i].seg[j].language = field->charset()->number;
if (field->null_ptr)
@@ -1428,6 +1432,13 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
table_arg->blob_ptr_size);
}
+ else if (field->type() == FIELD_TYPE_BIT)
+ {
+ keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len;
+ keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs;
+ keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr -
+ (uchar*) table_arg->record[0]);
+ }
}
keyseg+=pos->key_parts;
}
@@ -1471,11 +1482,10 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
break;
if (found->flags & BLOB_FLAG)
- {
recinfo_pos->type= (int) FIELD_BLOB;
- }
- else if (!(options & HA_OPTION_PACK_RECORD) ||
- found->type() == MYSQL_TYPE_VARCHAR)
+ else if (found->type() == MYSQL_TYPE_VARCHAR)
+ recinfo_pos->type= FIELD_VARCHAR;
+ else if (!(options & HA_OPTION_PACK_RECORD))
recinfo_pos->type= (int) FIELD_NORMAL;
else if (found->zero_pack())
recinfo_pos->type= (int) FIELD_SKIP_ZERO;
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 527e6a49aba..d2fe36c8357 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -47,7 +47,7 @@ class ha_myisam: public handler
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED),
+ HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD),
can_enable_indexes(1)
{}
~ha_myisam() {}
diff --git a/sql/handler.cc b/sql/handler.cc
index edb4d5b488b..e43f2c2e888 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -47,6 +47,9 @@
#ifdef HAVE_NDBCLUSTER_DB
#include "ha_ndbcluster.h"
#endif
+#ifdef HAVE_FEDERATED_DB
+#include "ha_federated.h"
+#endif
#include <myisampack.h>
#include <errno.h>
@@ -92,6 +95,8 @@ struct show_table_type_st sys_table_types[]=
"Archive storage engine", DB_TYPE_ARCHIVE_DB},
{"CSV",&have_csv_db,
"CSV storage engine", DB_TYPE_CSV_DB},
+ {"FEDERATED",&have_federated_db,
+ "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB},
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
};
@@ -200,6 +205,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
#endif
+#ifdef HAVE_FEDERATED_DB
+ case DB_TYPE_FEDERATED_DB:
+ return new ha_federated(table);
+#endif
#ifdef HAVE_CSV_DB
case DB_TYPE_CSV_DB:
return new ha_tina(table);
diff --git a/sql/handler.h b/sql/handler.h
index bd2d5e42b65..f0faeff9234 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -75,6 +75,7 @@
/* Table data are stored in separate files (for lower_case_table_names) */
#define HA_FILE_BASED (1 << 26)
#define HA_NO_VARCHAR (1 << 27)
+#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
/* bits in index_flags(index_number) for what you can do with index */
@@ -152,6 +153,7 @@ enum db_type
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
+ DB_TYPE_FEDERATED_DB,
DB_TYPE_DEFAULT // Must be last
};
diff --git a/sql/item.cc b/sql/item.cc
index f00a35fe628..9117105f26e 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2573,11 +2573,11 @@ void Item_real::print(String *str)
}
-/****************************************************************************
-** varbinary item
-** In string context this is a binary string
-** In number context this is a longlong value.
-****************************************************************************/
+/*
+ hex item
+ In string context this is a binary string.
+ In number context this is a longlong value.
+*/
inline uint char_val(char X)
{
@@ -2587,7 +2587,7 @@ inline uint char_val(char X)
}
-Item_varbinary::Item_varbinary(const char *str, uint str_length)
+Item_hex_string::Item_hex_string(const char *str, uint str_length)
{
name=(char*) str-2; // Lex makes this start with 0x
max_length=(str_length+1)/2;
@@ -2608,7 +2608,7 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length)
fixed= 1;
}
-longlong Item_varbinary::val_int()
+longlong Item_hex_string::val_int()
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
@@ -2622,7 +2622,7 @@ longlong Item_varbinary::val_int()
}
-int Item_varbinary::save_in_field(Field *field, bool no_conversions)
+int Item_hex_string::save_in_field(Field *field, bool no_conversions)
{
int error;
field->set_notnull();
@@ -2640,6 +2640,44 @@ int Item_varbinary::save_in_field(Field *field, bool no_conversions)
/*
+ bin item.
+ In string context this is a binary string.
+ In number context this is a longlong value.
+*/
+
+Item_bin_string::Item_bin_string(const char *str, uint str_length)
+{
+ const char *end= str + str_length - 1;
+ uchar bits= 0;
+ uint power= 1;
+
+ name= (char*) str - 2;
+ max_length= (str_length + 7) >> 3;
+ char *ptr= (char*) sql_alloc(max_length + 1);
+ if (!ptr)
+ return;
+ str_value.set(ptr, max_length, &my_charset_bin);
+ ptr+= max_length - 1;
+ ptr[1]= 0; // Set end null for string
+ for (; end >= str; end--)
+ {
+ if (power == 256)
+ {
+ power= 1;
+ *ptr--= bits;
+ bits= 0;
+ }
+ if (*end == '1')
+ bits|= power;
+ power<<= 1;
+ }
+ *ptr= (char) bits;
+ collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
+ fixed= 1;
+}
+
+
+/*
Pack data in buffer for sending
*/
@@ -2672,6 +2710,7 @@ bool Item::send(Protocol *protocol, String *buffer)
case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_BIT:
{
String *res;
if ((res=val_str(buffer)))
diff --git a/sql/item.h b/sql/item.h
index cf3dc8896a5..d5361bdcc8a 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -959,13 +959,14 @@ public:
};
-class Item_varbinary :public Item
+class Item_hex_string: public Item
{
public:
- Item_varbinary(const char *str,uint str_length);
+ Item_hex_string(): Item() {}
+ Item_hex_string(const char *str,uint str_length);
enum Type type() const { return VARBIN_ITEM; }
double val_real()
- { DBUG_ASSERT(fixed == 1); return (double) Item_varbinary::val_int(); }
+ { DBUG_ASSERT(fixed == 1); return (double) Item_hex_string::val_int(); }
longlong val_int();
bool basic_const_item() const { return 1; }
String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; }
@@ -977,6 +978,12 @@ public:
};
+class Item_bin_string: public Item_hex_string
+{
+public:
+ Item_bin_string(const char *str,uint str_length);
+};
+
class Item_result_field :public Item /* Item with result field */
{
public:
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index b242698d36e..949545bcdb0 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -1214,7 +1214,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2)
{
Field* f = *field;
int len = *lengths++;
- int res = f->key_cmp(key1, key2);
+ int res = f->cmp(key1, key2);
if (res)
return res;
key1 += len;
@@ -1668,7 +1668,7 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
{
int res;
uint offset= (uint) (field->ptr - record);
- if ((res= field->key_cmp(key1 + offset, key2 + offset)))
+ if ((res= field->cmp(key1 + offset, key2 + offset)))
return res;
}
}
@@ -1702,7 +1702,7 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
{
int res;
uint offset= (uint) (field->ptr - record);
- if ((res= field->key_cmp(key1 + offset, key2 + offset)))
+ if ((res= field->cmp(key1 + offset, key2 + offset)))
return (*order_item)->asc ? res : -res;
}
}
diff --git a/sql/key.cc b/sql/key.cc
index dfd924f1dc7..d54b8721cab 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -102,6 +102,19 @@ void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length)
key_part->null_bit);
key_length--;
}
+ if (key_part->type == HA_KEYTYPE_BIT)
+ {
+ Field_bit *field= (Field_bit *) (key_part->field);
+ if (field->bit_len)
+ {
+ uchar bits= get_rec_bits((uchar*) from_record +
+ key_part->null_offset +
+ (key_part->null_bit == 128),
+ field->bit_ofs, field->bit_len);
+ *to_key++= bits;
+ key_length--;
+ }
+ }
if (key_part->key_part_flag & HA_BLOB_PART)
{
char *pos;
@@ -170,6 +183,23 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
to_record[key_part->null_offset]&= ~key_part->null_bit;
key_length--;
}
+ if (key_part->type == HA_KEYTYPE_BIT)
+ {
+ Field_bit *field= (Field_bit *) (key_part->field);
+ if (field->bit_len)
+ {
+ uchar bits= *(from_key + key_part->length - field->field_length -1);
+ set_rec_bits(bits, to_record + key_part->null_offset +
+ (key_part->null_bit == 128),
+ field->bit_ofs, field->bit_len);
+ }
+ else
+ {
+ clr_rec_bits(to_record + key_part->null_offset +
+ (key_part->null_bit == 128),
+ field->bit_ofs, field->bit_len);
+ }
+ }
if (key_part->key_part_flag & HA_BLOB_PART)
{
uint blob_length= uint2korr(from_key);
@@ -220,54 +250,54 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length)
{
- uint length;
+ uint store_length;
KEY_PART_INFO *key_part;
+ const byte *key_end= key + key_length;;
for (key_part=table->key_info[idx].key_part;
- (int) key_length > 0;
- key_part++, key+=length, key_length-=length)
+ key < key_end ;
+ key_part++, key+= store_length)
{
+ uint length;
+ store_length= key_part->store_length;
+
if (key_part->null_bit)
{
- key_length--;
if (*key != test(table->record[0][key_part->null_offset] &
key_part->null_bit))
return 1;
if (*key)
- {
- length=key_part->store_length;
continue;
- }
key++;
+ store_length--;
}
- if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART))
+ if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
+ HA_BIT_PART))
{
if (key_part->field->key_cmp(key, key_part->length))
return 1;
- length=key_part->length+HA_KEY_BLOB_LENGTH;
+ continue;
}
- else
+ length= min((uint) (key_end-key), store_length);
+ if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
+ FIELDFLAG_PACK)))
{
- length=min(key_length,key_part->length);
- if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
- FIELDFLAG_PACK)))
+ CHARSET_INFO *cs= key_part->field->charset();
+ uint char_length= key_part->length / cs->mbmaxlen;
+ const byte *pos= table->record[0] + key_part->offset;
+ if (length > char_length)
{
- CHARSET_INFO *cs= key_part->field->charset();
- uint char_length= key_part->length / cs->mbmaxlen;
- const byte *pos= table->record[0] + key_part->offset;
- if (length > char_length)
- {
- char_length= my_charpos(cs, pos, pos + length, char_length);
- set_if_smaller(char_length, length);
- }
- if (cs->coll->strnncollsp(cs,
- (const uchar*) key, length,
- (const uchar*) pos, char_length, 0))
- return 1;
+ char_length= my_charpos(cs, pos, pos + length, char_length);
+ set_if_smaller(char_length, length);
}
- else if (memcmp(key,table->record[0]+key_part->offset,length))
- return 1;
+ if (cs->coll->strnncollsp(cs,
+ (const uchar*) key, length,
+ (const uchar*) pos, char_length, 0))
+ return 1;
+ continue;
}
+ if (memcmp(key,table->record[0]+key_part->offset,length))
+ return 1;
}
return 0;
}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 963ba484cea..2fc82e05f31 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1028,6 +1028,8 @@ extern uint opt_crash_binlog_innodb;
extern char *shared_memory_base_name, *mysqld_unix_port;
extern bool opt_enable_shared_memory;
extern char *default_tz_name;
+extern my_bool opt_large_pages;
+extern uint opt_large_page_size;
extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log;
extern FILE *bootstrap_file;
@@ -1071,6 +1073,7 @@ extern struct my_option my_long_options[];
extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db;
extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db;
+extern SHOW_COMP_OPTION have_federated_db;
extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink;
extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
@@ -1140,8 +1143,6 @@ my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist);
bool str_to_time_with_warn(const char *str,uint length,TIME *l_time);
timestamp_type str_to_datetime_with_warn(const char *str, uint length,
TIME *l_time, uint flags);
-longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date,
- int *was_cut);
void localtime_to_TIME(TIME *to, struct tm *from);
void calc_time_from_sec(TIME *to, long seconds, long microseconds);
@@ -1163,10 +1164,6 @@ void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time,
String *str);
void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time,
String *str);
-ulonglong TIME_to_ulonglong_datetime(const TIME *time);
-ulonglong TIME_to_ulonglong_date(const TIME *time);
-ulonglong TIME_to_ulonglong_time(const TIME *time);
-ulonglong TIME_to_ulonglong(const TIME *time);
int test_if_number(char *str,int *res,bool allow_wildcards);
void change_byte(byte *,uint,char,char);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 42ae6982eb0..c5698469341 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -78,10 +78,6 @@
#define IF_PURIFY(A,B) (B)
#endif
-#ifndef INADDR_NONE
-#define INADDR_NONE -1 // Error value from inet_addr
-#endif
-
/* stack traces are only supported on linux intel */
#if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK)
#define HAVE_STACK_TRACE_ON_SEGV
@@ -111,6 +107,7 @@ extern "C" { // Because of SCO 3.2V4.2
#ifdef HAVE_GRP_H
#include <grp.h>
#endif
+#include <my_net.h>
#if defined(OS2)
# include <sys/un.h>
@@ -299,6 +296,8 @@ my_bool opt_short_log_format= 0;
my_bool opt_log_queries_not_using_indexes= 0;
my_bool lower_case_file_system= 0;
my_bool opt_innodb_safe_binlog= 0;
+my_bool opt_large_pages= 0;
+uint opt_large_page_size= 0;
volatile bool mqh_used = 0;
uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
@@ -392,6 +391,7 @@ CHARSET_INFO *national_charset_info, *table_alias_charset;
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
have_example_db, have_archive_db, have_csv_db;
+SHOW_COMP_OPTION have_federated_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
@@ -2423,6 +2423,19 @@ static int init_common_variables(const char *conf_file_name, int argc,
DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname,
server_version, SYSTEM_TYPE,MACHINE_TYPE));
+#ifdef HAVE_LARGE_PAGES
+ /* Initialize large page size */
+ if (opt_large_pages && (opt_large_page_size= my_get_large_page_size()))
+ {
+ my_use_large_pages= 1;
+ my_large_page_size= opt_large_page_size;
+#ifdef HAVE_INNOBASE_DB
+ innobase_use_large_pages= 1;
+ innobase_large_page_size= opt_large_page_size;
+#endif
+ }
+#endif /* HAVE_LARGE_PAGES */
+
/* connections and databases needs lots of files */
{
uint files, wanted_files;
@@ -4086,6 +4099,8 @@ enum options_mysqld
OPT_INNODB_LOG_ARCHIVE,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
OPT_INNODB_FLUSH_METHOD,
+ OPT_INNODB_DOUBLEWRITE,
+ OPT_INNODB_CHECKSUMS,
OPT_INNODB_FAST_SHUTDOWN,
OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB,
OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
@@ -4184,7 +4199,8 @@ enum options_mysqld
OPT_OPTIMIZER_SEARCH_DEPTH,
OPT_OPTIMIZER_PRUNE_LEVEL,
OPT_UPDATABLE_VIEWS_WITH_LIMIT,
- OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET
+ OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET,
+ OPT_ENABLE_LARGE_PAGES
};
@@ -4343,6 +4359,12 @@ Disable with --skip-bdb (will save memory).",
"Set up signals usable for debugging",
(gptr*) &opt_debugging, (gptr*) &opt_debugging,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef HAVE_LARGE_PAGES
+ {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \
+Disable with --skip-large-pages.",
+ (gptr*) &opt_large_pages, (gptr*) &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0,
+ 0, 0, 0},
+#endif
{"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection",
(gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -4366,6 +4388,12 @@ Disable with --skip-innodb (will save memory).",
"The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir,
(gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
+ {"innodb_doublewrite", OPT_INNODB_DOUBLEWRITE, "Enable InnoDB doublewrite buffer (enabled by default). \
+Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
+ (gptr*) &innobase_use_doublewrite, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \
+Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums,
+ (gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN,
"Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown,
(gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
@@ -5687,7 +5715,8 @@ static void mysql_init_variables(void)
mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS;
bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list));
bzero((char *) &global_status_var, sizeof(global_status_var));
-
+ opt_large_pages= 0;
+
/* Character sets */
system_charset_info= &my_charset_utf8_general_ci;
files_charset_info= &my_charset_utf8_general_ci;
@@ -5793,6 +5822,11 @@ static void mysql_init_variables(void)
#else
have_archive_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_FEDERATED_DB
+ have_federated_db= SHOW_OPTION_YES;
+#else
+ have_federated_db= SHOW_OPTION_NO;
+#endif
#ifdef HAVE_CSV_DB
have_csv_db= SHOW_OPTION_YES;
#else
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index f9149f10a30..e47c7e147a7 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -746,7 +746,7 @@ int QUICK_RANGE_SELECT::init()
void QUICK_RANGE_SELECT::range_end()
{
if (file->inited != handler::NONE)
- file->ha_index_end();
+ file->ha_index_or_rnd_end();
}
@@ -777,8 +777,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param,
TABLE *table)
- :cur_quick_it(quick_selects),pk_quick_select(NULL),unique(NULL),
- thd(thd_param)
+ :pk_quick_select(NULL), thd(thd_param)
{
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT");
index= MAX_KEY;
@@ -790,17 +789,14 @@ QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param,
int QUICK_INDEX_MERGE_SELECT::init()
{
- cur_quick_it.rewind();
- cur_quick_select= cur_quick_it++;
- return 0;
+ DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::init");
+ DBUG_RETURN(0);
}
int QUICK_INDEX_MERGE_SELECT::reset()
{
- int result;
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::reset");
- result= cur_quick_select->reset() || prepare_unique();
- DBUG_RETURN(result);
+ DBUG_RETURN(read_keys_and_merge());
}
bool
@@ -820,8 +816,12 @@ QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range)
QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT()
{
+ List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects);
+ QUICK_RANGE_SELECT* quick;
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT");
- delete unique;
+ quick_it.rewind();
+ while ((quick= quick_it++))
+ quick->file= NULL;
quick_selects.delete_elements();
delete pk_quick_select;
free_root(&alloc,MYF(0));
@@ -833,7 +833,8 @@ QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param,
TABLE *table,
bool retrieve_full_rows,
MEM_ROOT *parent_alloc)
- : cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows)
+ : cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows),
+ scans_inited(false)
{
index= MAX_KEY;
head= table;
@@ -859,8 +860,9 @@ QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param,
int QUICK_ROR_INTERSECT_SELECT::init()
{
- /* Check if last_rowid was successfully allocated in ctor */
- return !last_rowid;
+ DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init");
+ /* Check if last_rowid was successfully allocated in ctor */
+ DBUG_RETURN(!last_rowid);
}
@@ -953,7 +955,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan");
/* Initialize all merged "children" quick selects */
- DBUG_ASSERT(!(need_to_fetch_row && !reuse_handler));
+ DBUG_ASSERT(!need_to_fetch_row || reuse_handler);
if (!need_to_fetch_row && reuse_handler)
{
quick= quick_it++;
@@ -995,7 +997,14 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
int QUICK_ROR_INTERSECT_SELECT::reset()
{
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset");
- DBUG_RETURN(init_ror_merged_scan(TRUE));
+ if (!scans_inited && init_ror_merged_scan(TRUE))
+ DBUG_RETURN(1);
+ scans_inited= true;
+ List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
+ QUICK_RANGE_SELECT *quick;
+ while ((quick= it++))
+ quick->reset();
+ DBUG_RETURN(0);
}
@@ -1034,7 +1043,7 @@ QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT()
QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param,
TABLE *table)
- :thd(thd_param)
+ : thd(thd_param), scans_inited(false)
{
index= MAX_KEY;
head= table;
@@ -1057,18 +1066,19 @@ QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param,
int QUICK_ROR_UNION_SELECT::init()
{
+ DBUG_ENTER("QUICK_ROR_UNION_SELECT::init");
if (init_queue(&queue, quick_selects.elements, 0,
FALSE , QUICK_ROR_UNION_SELECT::queue_cmp,
(void*) this))
{
bzero(&queue, sizeof(QUEUE));
- return 1;
+ DBUG_RETURN(1);
}
if (!(cur_rowid= (byte*)alloc_root(&alloc, 2*head->file->ref_length)))
- return 1;
+ DBUG_RETURN(1);
prev_rowid= cur_rowid + head->file->ref_length;
- return 0;
+ DBUG_RETURN(0);
}
@@ -1106,6 +1116,18 @@ int QUICK_ROR_UNION_SELECT::reset()
int error;
DBUG_ENTER("QUICK_ROR_UNION_SELECT::reset");
have_prev_rowid= FALSE;
+ if (!scans_inited)
+ {
+ QUICK_SELECT_I *quick;
+ List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
+ while ((quick= it++))
+ {
+ if (quick->init_ror_merged_scan(FALSE))
+ DBUG_RETURN(1);
+ }
+ scans_inited= true;
+ }
+ queue_remove_all(&queue);
/*
Initialize scans for merged quick selects and put all merged quick
selects into the queue.
@@ -1113,7 +1135,7 @@ int QUICK_ROR_UNION_SELECT::reset()
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
while ((quick= it++))
{
- if (quick->init_ror_merged_scan(FALSE))
+ if (quick->reset())
DBUG_RETURN(1);
if ((error= quick->get_next()))
{
@@ -1591,7 +1613,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
-
delete quick;
quick=0;
needed_reg.clear_all();
@@ -3687,7 +3708,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
}
/* Get local copy of key */
copies= 1;
- if (field->key_type() == HA_KEYTYPE_VARTEXT)
+ if (field->key_type() == HA_KEYTYPE_VARTEXT1 ||
+ field->key_type() == HA_KEYTYPE_VARTEXT2)
copies= 2;
str= str2= (char*) alloc_root(param->mem_root,
(key_part->store_length)*copies+1);
@@ -4999,7 +5021,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
char *min_key,uint min_key_flag, char *max_key,
uint max_key_flag)
{
- ha_rows records=0,tmp;
+ ha_rows records=0, tmp;
+ uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length;
+ char *tmp_min_key, *tmp_max_key;
param->max_key_part=max(param->max_key_part,key_tree->part);
if (key_tree->left != &null_element)
@@ -5017,13 +5041,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
return records;
}
- uint tmp_min_flag,tmp_max_flag,keynr;
- char *tmp_min_key=min_key,*tmp_max_key=max_key;
-
+ tmp_min_key= min_key;
+ tmp_max_key= max_key;
key_tree->store(param->key[idx][key_tree->part].store_length,
&tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag);
- uint min_key_length= (uint) (tmp_min_key- param->min_key);
- uint max_key_length= (uint) (tmp_max_key- param->max_key);
+ min_key_length= (uint) (tmp_min_key- param->min_key);
+ max_key_length= (uint) (tmp_max_key- param->max_key);
if (param->is_ror_scan)
{
@@ -5551,22 +5574,29 @@ err:
/*
- Fetch all row ids into unique.
-
+ Perform key scans for all used indexes (except CPK), get rowids and merge
+ them into an ordered non-recurrent sequence of rowids.
+
+ The merge/duplicate removal is performed using Unique class. We put all
+ rowids into Unique, get the sorted sequence and destroy the Unique.
+
If table has a clustered primary key that covers all rows (TRUE for bdb
and innodb currently) and one of the index_merge scans is a scan on PK,
then
- primary key scan rowids are not put into Unique and also
- rows that will be retrieved by PK scan are not put into Unique
+ rows that will be retrieved by PK scan are not put into Unique and
+ primary key scan is not performed here, it is performed later separately.
RETURN
0 OK
other error
*/
-int QUICK_INDEX_MERGE_SELECT::prepare_unique()
+int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
{
+ List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects);
+ QUICK_RANGE_SELECT* cur_quick;
int result;
+ Unique *unique;
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique");
/* We're going to just read rowids. */
@@ -5581,7 +5611,17 @@ int QUICK_INDEX_MERGE_SELECT::prepare_unique()
*/
head->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- cur_quick_select->init();
+ cur_quick_it.rewind();
+ cur_quick= cur_quick_it++;
+ DBUG_ASSERT(cur_quick);
+
+ /*
+ We reuse the same instance of handler so we need to call both init and
+ reset here.
+ */
+ if (cur_quick->init())
+ DBUG_RETURN(1);
+ cur_quick->reset();
unique= new Unique(refpos_order_cmp, (void *)head->file,
head->file->ref_length,
@@ -5590,24 +5630,28 @@ int QUICK_INDEX_MERGE_SELECT::prepare_unique()
DBUG_RETURN(1);
for (;;)
{
- while ((result= cur_quick_select->get_next()) == HA_ERR_END_OF_FILE)
+ while ((result= cur_quick->get_next()) == HA_ERR_END_OF_FILE)
{
- cur_quick_select->range_end();
- cur_quick_select= cur_quick_it++;
- if (!cur_quick_select)
+ cur_quick->range_end();
+ cur_quick= cur_quick_it++;
+ if (!cur_quick)
break;
- if (cur_quick_select->init())
+ if (cur_quick->file->inited != handler::NONE)
+ cur_quick->file->ha_index_end();
+ if (cur_quick->init())
DBUG_RETURN(1);
-
/* QUICK_RANGE_SELECT::reset never fails */
- cur_quick_select->reset();
+ cur_quick->reset();
}
if (result)
{
if (result != HA_ERR_END_OF_FILE)
+ {
+ cur_quick->range_end();
DBUG_RETURN(result);
+ }
break;
}
@@ -5618,8 +5662,8 @@ int QUICK_INDEX_MERGE_SELECT::prepare_unique()
if (pk_quick_select && pk_quick_select->row_in_ranges())
continue;
- cur_quick_select->file->position(cur_quick_select->record);
- result= unique->unique_add((char*)cur_quick_select->file->ref);
+ cur_quick->file->position(cur_quick->record);
+ result= unique->unique_add((char*)cur_quick->file->ref);
if (result)
DBUG_RETURN(1);
@@ -5627,6 +5671,7 @@ int QUICK_INDEX_MERGE_SELECT::prepare_unique()
/* ok, all row ids are in Unique */
result= unique->get(head);
+ delete unique;
doing_pk_scan= FALSE;
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
@@ -5666,6 +5711,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
doing_pk_scan= TRUE;
if ((result= pk_quick_select->init()))
DBUG_RETURN(result);
+ pk_quick_select->reset();
DBUG_RETURN(pk_quick_select->get_next());
}
}
@@ -5888,7 +5934,7 @@ int QUICK_RANGE_SELECT::get_next()
SYNOPSIS
QUICK_RANGE_SELECT::get_next_prefix()
prefix_length length of cur_prefix
- cur_prefix prefix of a key to be searached for
+ cur_prefix prefix of a key to be searched for
DESCRIPTION
Each subsequent call to the method retrieves the first record that has a
@@ -7402,7 +7448,8 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
quick->quick_prefix_select= NULL; /* Can't construct a quick select. */
else
/* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */
- quick->quick_prefix_select= get_quick_select(param, param_idx, index_tree,
+ quick->quick_prefix_select= get_quick_select(param, param_idx,
+ index_tree,
&quick->alloc);
/*
@@ -8446,7 +8493,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
store_length--;
}
field->set_key_image((char*) key, key_part->length);
- field->val_str(&tmp);
+ if (field->type() == MYSQL_TYPE_BIT)
+ (void) field->val_int_as_str(&tmp, 1);
+ else
+ field->val_str(&tmp);
fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE);
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 19234f61ea2..74d388128c8 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -127,7 +127,8 @@ public:
reset() should be called when it is certain that row retrieval will be
necessary. This call may do heavyweight initialization like buffering first
N records etc. If reset() call fails get_next() must not be called.
-
+ Note that reset() may be called several times if this quick select
+ executes in a subselect.
RETURN
0 OK
other Error code
@@ -274,6 +275,10 @@ public:
next=0;
range= NULL;
cur_range= NULL;
+ /*
+ Note: in opt_range.cc there are places where it is assumed that this
+ function always succeeds
+ */
return 0;
}
int init();
@@ -388,21 +393,15 @@ public:
/* range quick selects this index_merge read consists of */
List<QUICK_RANGE_SELECT> quick_selects;
- /* quick select which is currently used for rows retrieval */
- List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it;
- QUICK_RANGE_SELECT* cur_quick_select;
-
/* quick select that uses clustered primary key (NULL if none) */
QUICK_RANGE_SELECT* pk_quick_select;
/* true if this select is currently doing a clustered PK scan */
bool doing_pk_scan;
- Unique *unique;
MEM_ROOT alloc;
-
THD *thd;
- int prepare_unique();
+ int read_keys_and_merge();
/* used to get rows collected in Unique */
READ_RECORD read_record;
@@ -465,6 +464,8 @@ public:
MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */
THD *thd; /* current thread */
bool need_to_fetch_row; /* if true, do retrieve full table records. */
+ /* in top-level quick select, true if merged scans where initialized */
+ bool scans_inited;
};
@@ -514,6 +515,7 @@ public:
uint rowid_length; /* table rowid length */
private:
static int queue_cmp(void *arg, byte *val1, byte *val2);
+ bool scans_inited;
};
diff --git a/sql/protocol.cc b/sql/protocol.cc
index d2e63539610..4c916d78378 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -726,6 +726,7 @@ bool Protocol_simple::store(const char *from, uint length,
#ifndef DEBUG_OFF
DBUG_ASSERT(field_types == 0 ||
field_types[field_pos] == MYSQL_TYPE_DECIMAL ||
+ field_types[field_pos] == MYSQL_TYPE_BIT ||
(field_types[field_pos] >= MYSQL_TYPE_ENUM &&
field_types[field_pos] <= MYSQL_TYPE_GEOMETRY));
field_pos++;
@@ -741,6 +742,7 @@ bool Protocol_simple::store(const char *from, uint length,
#ifndef DEBUG_OFF
DBUG_ASSERT(field_types == 0 ||
field_types[field_pos] == MYSQL_TYPE_DECIMAL ||
+ field_types[field_pos] == MYSQL_TYPE_BIT ||
(field_types[field_pos] >= MYSQL_TYPE_ENUM &&
field_types[field_pos] <= MYSQL_TYPE_GEOMETRY));
field_pos++;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 234ec6617c3..da6341597f1 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -715,7 +715,8 @@ struct show_var_st init_vars[]= {
{"have_compress", (char*) &have_compress, SHOW_HAVE},
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
{"have_csv", (char*) &have_csv_db, SHOW_HAVE},
- {"have_example_engine", (char*) &have_example_db, SHOW_HAVE},
+ {"have_example_engine", (char*) &have_example_db, SHOW_HAVE},
+ {"have_federated_db", (char*) &have_federated_db, SHOW_HAVE},
{"have_geometry", (char*) &have_geometry, SHOW_HAVE},
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
@@ -735,6 +736,8 @@ struct show_var_st init_vars[]= {
{"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG },
{"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR},
{"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR},
+ {"innodb_doublewrite", (char*) &innobase_use_doublewrite, SHOW_MY_BOOL},
+ {"innodb_checksums", (char*) &innobase_use_checksums, SHOW_MY_BOOL},
{"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL},
{"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG },
{"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL},
@@ -768,6 +771,8 @@ struct show_var_st init_vars[]= {
SHOW_SYS},
{"language", language, SHOW_CHAR},
{"large_files_support", (char*) &opt_large_files, SHOW_BOOL},
+ {"large_pages", (char*) &opt_large_pages, SHOW_MY_BOOL},
+ {"large_page_size", (char*) &opt_large_page_size, SHOW_INT},
{sys_license.name, (char*) &sys_license, SHOW_SYS},
{sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS},
#ifdef HAVE_MLOCKALL
diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am
index b50ba2be8da..cfbbb36c489 100644
--- a/sql/share/Makefile.am
+++ b/sql/share/Makefile.am
@@ -1,5 +1,7 @@
## Process this file with automake to create Makefile.in
+EXTRA_DIST= errmsg.txt
+
dist-hook:
for dir in charsets @AVAILABLE_LANGUAGES@; do \
test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 714b18b39f2..c93e203f0d5 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -4011,22 +4011,14 @@ ER_DUMP_NOT_IMPLEMENTED
dan "Denne tabeltype unserstøtter ikke binært tabeldump"
nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump"
eng "The storage engine for the table does not support binary table dump"
- est "The handler for the table does not support binary table dump"
fre "Ce type de table ne supporte pas les copies binaires"
ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump"
- greek "The handler for the table does not support binary table dump"
- hun "The handler for the table does not support binary table dump"
ita "Il gestore per la tabella non supporta il dump binario"
jpn "The handler for the table does not support binary table dump"
- kor "The handler for the table does not support binary table dump"
- nor "The handler for the table does not support binary table dump"
- norwegian-ny "The handler for the table does not support binary table dump"
- pol "The handler for the table does not support binary table dump"
por "O manipulador de tabela não suporta 'dump' binário de tabela"
rum "The handler for the table does not support binary table dump"
rus "ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)"
serbian "Handler tabele ne podržava binarni dump tabele"
- slo "The handler for the table does not support binary table dump"
spa "El manipulador de tabla no soporta dump para tabla binaria"
swe "Tabellhanteraren klarar inte en binär kopiering av tabellen"
ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ"
@@ -4059,24 +4051,15 @@ ER_INDEX_REBUILD
dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'"
nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'"
eng "Failed rebuilding the index of dumped table '%-.64s'"
- est "Failed rebuilding the index of dumped table '%-.64s'"
fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué"
ger "Neuerstellung des Indizes der Dump-Tabelle '%-.64s' fehlgeschlagen"
greek "Failed rebuilding the index of dumped table '%-.64s'"
hun "Failed rebuilding the index of dumped table '%-.64s'"
ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'"
- jpn "Failed rebuilding the index of dumped table '%-.64s'"
- kor "Failed rebuilding the index of dumped table '%-.64s'"
- nor "Failed rebuilding the index of dumped table '%-.64s'"
- norwegian-ny "Failed rebuilding the index of dumped table '%-.64s'"
- pol "Failed rebuilding the index of dumped table '%-.64s'"
por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'"
- rum "Failed rebuilding the index of dumped table '%-.64s'"
rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'"
serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela"
- slo "Failed rebuilding the index of dumped table '%-.64s'"
spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'"
- swe "Failed rebuilding the index of dumped table '%-.64s'"
ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'"
ER_MASTER
cze "Chyba masteru: '%-.64s'"
@@ -4219,7 +4202,6 @@ ER_TRANS_CACHE_FULL
ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare"
por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente"
rus "ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ"
- serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server."
spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo"
swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt"
ukr "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ ¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. ú¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ"
@@ -4232,8 +4214,7 @@ ER_SLAVE_MUST_STOP
ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE"
por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro"
rus "üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE"
- serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'"
- slo "This operation cannot be performed with a running slave, run STOP SLAVE first"
+ serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server."
spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE"
swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först"
ukr "ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE"
@@ -4246,8 +4227,7 @@ ER_SLAVE_NOT_RUNNING
ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE"
por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE"
rus "äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE"
- serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
- slo "This operation requires a running slave, configure slave and do START SLAVE"
+ serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'"
spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE"
swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE"
ukr "ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎƦÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE"
@@ -4260,25 +4240,15 @@ ER_BAD_SLAVE
ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO"
por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO"
rus "üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO"
- serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
- slo "The server is not configured as slave, fix in config file or with CHANGE MASTER TO"
+ serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO"
swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO"
ukr "óÅÒ×ÅÒ ÎÅ ÚËÏÎƦÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎƦÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO"
ER_MASTER_INFO
- dan "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- nla "Could not initialize master info structure, more error messages can be found in the MySQL error log"
eng "Could not initialize master info structure; more error messages can be found in the MySQL error log"
fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL"
- ger "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- ita "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- por "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- rus "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse"
- slo "Could not initialize master info structure, more error messages can be found in the MySQL error log"
- spa "Could not initialize master info structure, more error messages can be found in the MySQL error log"
+ serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information"
- ukr "Could not initialize master info structure, more error messages can be found in the MySQL error log"
ER_SLAVE_THREAD
dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
nla "Kon slave thread niet aanmaken, controleer systeem resources"
@@ -4288,8 +4258,8 @@ ER_SLAVE_THREAD
ita "Impossibile creare il thread 'slave', controllare le risorse di sistema"
por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema"
rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ"
- serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom"
slo "Could not create slave thread, check system resources"
+ serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse"
spa "No puedo crear el thread esclavo, verifique recursos del sistema"
swe "Kunde inte starta en tråd för replikering"
ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ"
@@ -4303,7 +4273,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000
ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
rus "õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ"
- serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
+ serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom"
spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas"
swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar"
ukr "ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ ¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ"
@@ -4317,7 +4287,7 @@ ER_SET_CONSTANTS_ONLY
ita "Si possono usare solo espressioni costanti con SET"
por "Você pode usar apenas expressões constantes com SET"
rus "÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ"
- serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju"
+ serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
spa "Tu solo debes usar expresiones constantes con SET"
swe "Man kan endast använda konstantuttryck med SET"
ukr "íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET"
@@ -4331,7 +4301,7 @@ ER_LOCK_WAIT_TIMEOUT
ita "E' scaduto il timeout per l'attesa del lock"
por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
rus "ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
- serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja"
+ serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju"
spa "Tiempo de bloqueo de espera excedido"
swe "Fick inte ett lås i tid ; Försök att starta om transaktionen"
ukr "úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ"
@@ -4345,7 +4315,7 @@ ER_LOCK_TABLE_FULL
ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
por "O número total de travamentos excede o tamanho da tabela de travamentos"
rus "ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË"
- serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
+ serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja"
spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla"
swe "Antal lås överskrider antalet reserverade lås"
ukr "úÁÇÁÌØÎÁ ˦ÌØ˦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ"
@@ -4359,7 +4329,7 @@ ER_READ_ONLY_TRANSACTION 25000
ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'"
por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED"
rus "âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ"
- serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
+ serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED"
swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED"
ukr "ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED"
@@ -4373,7 +4343,7 @@ ER_DROP_DB_WITH_READ_LOCK
ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
- serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
+ serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås"
ukr "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
@@ -4387,7 +4357,7 @@ ER_CREATE_DB_WITH_READ_LOCK
ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
- serbian "Pogrešni argumenti prosleðeni na %s"
+ serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås"
ukr "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
@@ -4400,7 +4370,7 @@ ER_WRONG_ARGUMENTS
ita "Argomenti errati a %s"
por "Argumentos errados para %s"
rus "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s"
- serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
+ serbian "Pogrešni argumenti prosleðeni na %s"
spa "Argumentos errados para %s"
swe "Felaktiga argument till %s"
ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s"
@@ -4413,7 +4383,7 @@ ER_NO_PERMISSION_TO_CREATE_USER 42000
ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti"
por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários"
rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ"
- serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
+ serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios"
swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare"
ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×"
@@ -4426,7 +4396,7 @@ ER_UNION_TABLES_IN_DIFFERENT_DIR
ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
rus "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ"
- serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju"
+ serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos"
swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas"
ER_LOCK_DEADLOCK 40001
@@ -4438,10 +4408,9 @@ ER_LOCK_DEADLOCK 40001
ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
rus "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
- serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
+ serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju"
spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen"
- ukr "Deadlock found when trying to get lock; Try restarting transaction"
ER_TABLE_CANT_HANDLE_FT
nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen"
eng "The used table type doesn't support FULLTEXT indexes"
@@ -4451,7 +4420,7 @@ ER_TABLE_CANT_HANDLE_FT
ita "La tabella usata non supporta gli indici FULLTEXT"
por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)"
rus "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×"
- serbian "Ne mogu da dodam proveru spoljnog kljuèa"
+ serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
spa "El tipo de tabla usada no soporta índices FULLTEXT"
swe "Tabelltypen har inte hantering av FULLTEXT-index"
ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×"
@@ -4463,55 +4432,34 @@ ER_CANNOT_ADD_FOREIGN
ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)"
por "Não pode acrescentar uma restrição de chave estrangeira"
rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ"
- serbian "Ne mogu da dodam slog: provera spoljnog kljuèa je neuspela"
+ serbian "Ne mogu da dodam proveru spoljnog kljuèa"
spa "No puede adicionar clave extranjera constraint"
swe "Kan inte lägga till 'FOREIGN KEY constraint'"
ER_NO_REFERENCED_ROW 23000
- dan "Cannot add a child row: a foreign key constraint fails"
nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald"
eng "Cannot add or update a child row: a foreign key constraint fails"
- est "Cannot add a child row: a foreign key constraint fails"
fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche"
ger "Hinzufügen eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
greek "Cannot add a child row: a foreign key constraint fails"
hun "Cannot add a child row: a foreign key constraint fails"
ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
- jpn "Cannot add a child row: a foreign key constraint fails"
- kor "Cannot add a child row: a foreign key constraint fails"
- nor "Cannot add a child row: a foreign key constraint fails"
norwegian-ny "Cannot add a child row: a foreign key constraint fails"
- pol "Cannot add a child row: a foreign key constraint fails"
por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou"
- rum "Cannot add a child row: a foreign key constraint fails"
rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
- serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela"
- slo "Cannot add a child row: a foreign key constraint fails"
spa "No puede adicionar una línea hijo: falla de clave extranjera constraint"
swe "FOREIGN KEY-konflikt: Kan inte skriva barn"
- ukr "Cannot add a child row: a foreign key constraint fails"
ER_ROW_IS_REFERENCED 23000
- dan "Cannot delete a parent row: a foreign key constraint fails"
- nla "Kan bovenliggende rij nite verwijderen: foreign key beperking gefaald"
eng "Cannot delete or update a parent row: a foreign key constraint fails"
- est "Cannot delete a parent row: a foreign key constraint fails"
fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche"
ger "Löschen eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
greek "Cannot delete a parent row: a foreign key constraint fails"
hun "Cannot delete a parent row: a foreign key constraint fails"
ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
- jpn "Cannot delete a parent row: a foreign key constraint fails"
- kor "Cannot delete a parent row: a foreign key constraint fails"
- nor "Cannot delete a parent row: a foreign key constraint fails"
- norwegian-ny "Cannot delete a parent row: a foreign key constraint fails"
- pol "Cannot delete a parent row: a foreign key constraint fails"
por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou"
- rum "Cannot delete a parent row: a foreign key constraint fails"
rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
- serbian "Greška pri povezivanju sa glavnim serverom u klasteru: %-.128s"
- slo "Cannot delete a parent row: a foreign key constraint fails"
+ serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela"
spa "No puede deletar una línea padre: falla de clave extranjera constraint"
swe "FOREIGN KEY-konflikt: Kan inte radera fader"
- ukr "Cannot delete a parent row: a foreign key constraint fails"
ER_CONNECT_TO_MASTER 08S01
nla "Fout bij opbouwen verbinding naar master: %-.128s"
eng "Error connecting to master: %-.128s"
@@ -4519,7 +4467,6 @@ ER_CONNECT_TO_MASTER 08S01
ita "Errore durante la connessione al master: %-.128s"
por "Erro conectando com o master: %-.128s"
rus "ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s"
- serbian "Greška pri izvršavanju upita na glavnom serveru u klasteru: %-.128s"
spa "Error de coneccion a master: %-.128s"
swe "Fick fel vid anslutning till master: %-.128s"
ER_QUERY_ON_MASTER
@@ -4529,7 +4476,6 @@ ER_QUERY_ON_MASTER
ita "Errore eseguendo una query sul master: %-.128s"
por "Erro rodando consulta no master: %-.128s"
rus "ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s"
- serbian "Greška pri izvršavanju komande %s: %-.128s"
spa "Error executando el query en master: %-.128s"
swe "Fick fel vid utförande av command på mastern: %-.128s"
ER_ERROR_WHEN_EXECUTING_COMMAND
@@ -4540,7 +4486,7 @@ ER_ERROR_WHEN_EXECUTING_COMMAND
ita "Errore durante l'esecuzione del comando %s: %-.128s"
por "Erro quando executando comando %s: %-.128s"
rus "ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s"
- serbian "Pogrešna upotreba %s i %s"
+ serbian "Greška pri izvršavanju komande %s: %-.128s"
spa "Error de %s: %-.128s"
swe "Fick fel vid utförande av %s: %-.128s"
ER_WRONG_USAGE
@@ -4551,7 +4497,7 @@ ER_WRONG_USAGE
ita "Uso errato di %s e %s"
por "Uso errado de %s e %s"
rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s"
- serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona"
+ serbian "Pogrešna upotreba %s i %s"
spa "Equivocado uso de %s y %s"
swe "Felaktig använding av %s and %s"
ukr "Wrong usage of %s and %s"
@@ -4563,7 +4509,7 @@ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000
ita "La SELECT utilizzata ha un numero di colonne differente"
por "Os comandos SELECT usados têm diferente número de colunas"
rus "éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
- serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu"
+ serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona"
spa "El comando SELECT usado tiene diferente número de columnas"
swe "SELECT-kommandona har olika antal kolumner"
ER_CANT_UPDATE_WITH_READLOCK
@@ -4574,7 +4520,7 @@ ER_CANT_UPDATE_WITH_READLOCK
ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura"
por "Não posso executar a consulta porque você tem um conflito de travamento de leitura"
rus "îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ"
- serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno"
+ serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu"
spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura"
swe "Kan inte utföra kommandot emedan du har ett READ-lås"
ER_MIXING_NOT_ALLOWED
@@ -4585,7 +4531,7 @@ ER_MIXING_NOT_ALLOWED
ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali"
por "Mistura de tabelas transacional e não-transacional está desabilitada"
rus "éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ"
- serbian "Opcija '%s' je upotrebljena dva puta u istom iskazu"
+ serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno"
spa "Mezla de transancional y no-transancional tablas está deshabilitada"
swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat"
ER_DUP_ARGUMENT
@@ -4596,7 +4542,6 @@ ER_DUP_ARGUMENT
ita "L'opzione '%s' e' stata usata due volte nel comando"
por "Opção '%s' usada duas vezes no comando"
rus "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ"
- serbian "User '%-.64s' has exceeded the '%s' resource (current value: %ld)"
spa "Opción '%s' usada dos veces en el comando"
swe "Option '%s' användes två gånger"
ER_USER_LIMIT_REACHED 42000
@@ -4606,7 +4551,6 @@ ER_USER_LIMIT_REACHED 42000
ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)"
por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)"
rus "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)"
- serbian "Access denied; you need the %-.128s privilege for this operation"
spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)"
swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)"
ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -4616,7 +4560,6 @@ ER_SPECIFIC_ACCESS_DENIED_ERROR
ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
rus "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ"
- serbian "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL"
spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación"
swe "Du har inte privlegiet '%-.128s' som behövs för denna operation"
ukr "Access denied. You need the %-.128s privilege for this operation"
@@ -4627,7 +4570,6 @@ ER_LOCAL_VARIABLE
ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL"
por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL"
rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL"
- serbian "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL"
spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL"
swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL"
ER_GLOBAL_VARIABLE
@@ -4637,7 +4579,6 @@ ER_GLOBAL_VARIABLE
ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL"
por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL"
rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL"
- serbian "Variable '%-.64s' doesn't have a default value"
spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL"
swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL"
ER_NO_DEFAULT 42000
@@ -4647,7 +4588,6 @@ ER_NO_DEFAULT 42000
ita "La variabile '%-.64s' non ha un valore di default"
por "Variável '%-.64s' não tem um valor padrão"
rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ"
- serbian "Variable '%-.64s' can't be set to the value of '%-.64s'"
spa "Variable '%-.64s' no tiene un valor patrón"
swe "Variabel '%-.64s' har inte ett DEFAULT-värde"
ER_WRONG_VALUE_FOR_VAR 42000
@@ -4657,7 +4597,6 @@ ER_WRONG_VALUE_FOR_VAR 42000
ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'"
por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'"
rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'"
- serbian "Incorrect argument type to variable '%-.64s'"
spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'"
swe "Variabel '%-.64s' kan inte sättas till '%-.64s'"
ER_WRONG_TYPE_FOR_VAR 42000
@@ -4667,10 +4606,8 @@ ER_WRONG_TYPE_FOR_VAR 42000
ita "Tipo di valore errato per la variabile '%-.64s'"
por "Tipo errado de argumento para variável '%-.64s'"
rus "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'"
- serbian "Variable '%-.64s' can only be set, not read"
spa "Tipo de argumento equivocado para variable '%-.64s'"
swe "Fel typ av argument till variabel '%-.64s'"
- ukr "Wrong argument type to variable '%-.64s'"
ER_VAR_CANT_BE_READ
nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen"
eng "Variable '%-.64s' can only be set, not read"
@@ -4678,7 +4615,6 @@ ER_VAR_CANT_BE_READ
ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto"
por "Variável '%-.64s' somente pode ser configurada, não lida"
rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ"
- serbian "Incorrect usage/placement of '%s'"
spa "Variable '%-.64s' solamente puede ser configurada, no leída"
swe "Variabeln '%-.64s' kan endast sättas, inte läsas"
ER_CANT_USE_OPTION_HERE 42000
@@ -4688,10 +4624,8 @@ ER_CANT_USE_OPTION_HERE 42000
ita "Uso/posizione di '%s' sbagliato"
por "Errado uso/colocação de '%s'"
rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'"
- serbian "This version of MySQL doesn't yet support '%s'"
spa "Equivocado uso/colocación de '%s'"
swe "Fel använding/placering av '%s'"
- ukr "Wrong usage/placement of '%s'"
ER_NOT_SUPPORTED_YET 42000
nla "Deze versie van MySQL ondersteunt nog geen '%s'"
eng "This version of MySQL doesn't yet support '%s'"
@@ -4699,7 +4633,6 @@ ER_NOT_SUPPORTED_YET 42000
ita "Questa versione di MySQL non supporta ancora '%s'"
por "Esta versão de MySQL não suporta ainda '%s'"
rus "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'"
- serbian "Got fatal error %d: '%-.128s' from master when reading data from binary log"
spa "Esta versión de MySQL no soporta todavia '%s'"
swe "Denna version av MySQL kan ännu inte utföra '%s'"
ER_MASTER_FATAL_ERROR_READING_BINLOG
@@ -4709,14 +4642,12 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG
ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
rus "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ"
- serbian "Slave SQL thread ignored the query because of replicate-*-table rules"
spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log"
swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
ER_SLAVE_IGNORED_TABLE
eng "Slave SQL thread ignored the query because of replicate-*-table rules"
ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela"
- serbian "Variable '%-.64s' is a %s variable"
spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla"
swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel"
ER_INCORRECT_GLOBAL_LOCAL_VAR
@@ -4728,23 +4659,18 @@ ER_WRONG_FK_DEF 42000
eng "Incorrect foreign key definition for '%-.64s': %s"
ger "Falsche Fremdschlüssel-Definition für '%-64s': %s"
por "Definição errada da chave estrangeira para '%-.64s': %s"
- serbian "Key reference and table reference don't match"
spa "Equivocada definición de llave extranjera para '%-.64s': %s"
swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s"
- ukr "Wrong foreign key definition for '%-.64s': %s"
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF
eng "Key reference and table reference don't match"
ger "Schlüssel- und Tabellenverweis passen nicht zusammen"
por "Referência da chave e referência da tabela não coincidem"
- serbian "Operand should contain %d column(s)"
spa "Referencia de llave y referencia de tabla no coinciden"
swe "Nyckelreferensen och tabellreferensen stämmer inte överens"
- ukr "Key reference and table reference doesn't match"
ER_OPERAND_COLUMNS 21000
eng "Operand should contain %d column(s)"
ger "Operand solle %d Spalte(n) enthalten"
rus "ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË"
- serbian "Subquery returns more than 1 row"
spa "Operando debe tener %d columna(s)"
ukr "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×"
ER_SUBQUERY_NO_1_ROW 21000
@@ -4752,7 +4678,6 @@ ER_SUBQUERY_NO_1_ROW 21000
ger "Unterabfrage lieferte mehr als einen Datensatz zurück"
por "Subconsulta retorna mais que 1 registro"
rus "ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ"
- serbian "Unknown prepared statement handler (%ld) given to %s"
spa "Subconsulta retorna mas que 1 línea"
swe "Subquery returnerade mer än 1 rad"
ukr "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ ¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ"
@@ -4761,7 +4686,6 @@ ER_UNKNOWN_STMT_HANDLER
eng "Unknown prepared statement handler (%.*s) given to %s"
ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben"
por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s"
- serbian "Help database is corrupt or does not exist"
spa "Desconocido preparado comando handler (%ld) dado para %s"
swe "Okänd PREPARED STATEMENT id (%ld) var given till %s"
ukr "Unknown prepared statement handler (%ld) given to %s"
@@ -4769,7 +4693,6 @@ ER_CORRUPT_HELP_DB
eng "Help database is corrupt or does not exist"
ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht"
por "Banco de dado de ajuda corrupto ou não existente"
- serbian "Cyclic reference on subqueries"
spa "Base de datos Help está corrupto o no existe"
swe "Hjälpdatabasen finns inte eller är skadad"
ER_CYCLIC_REFERENCE
@@ -4777,7 +4700,6 @@ ER_CYCLIC_REFERENCE
ger "Zyklischer Verweis in Unterabfragen"
por "Referência cíclica em subconsultas"
rus "ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ"
- serbian "Converting column '%s' from %s to %s"
spa "Cíclica referencia en subconsultas"
swe "Cyklisk referens i subqueries"
ukr "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ"
@@ -4786,7 +4708,6 @@ ER_AUTO_CONVERT
ger "Spalte '%s' wird von %s nach %s umgewandelt"
por "Convertendo coluna '%s' de %s para %s"
rus "ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s"
- serbian "Reference '%-.64s' not supported (%s)"
spa "Convirtiendo columna '%s' de %s para %s"
swe "Konvertar kolumn '%s' från %s till %s"
ukr "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s"
@@ -4795,7 +4716,6 @@ ER_ILLEGAL_REFERENCE 42S22
ger "Verweis '%-.64s' wird nicht unterstützt (%s)"
por "Referência '%-.64s' não suportada (%s)"
rus "óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)"
- serbian "Every derived table must have its own alias"
spa "Referencia '%-.64s' no soportada (%s)"
swe "Referens '%-.64s' stöds inte (%s)"
ukr "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)"
@@ -4803,16 +4723,13 @@ ER_DERIVED_MUST_HAVE_ALIAS 42000
eng "Every derived table must have its own alias"
ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden"
por "Cada tabela derivada deve ter seu próprio alias"
- serbian "Select %u was reduced during optimization"
spa "Cada tabla derivada debe tener su propio alias"
swe "Varje 'derived table' måste ha sitt eget alias"
- ukr "Every derived table must have it's own alias"
ER_SELECT_REDUCED 01000
eng "Select %u was reduced during optimization"
ger "Select %u wurde während der Optimierung reduziert"
por "Select %u foi reduzido durante otimização"
rus "Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ"
- serbian "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s"
spa "Select %u fué reducido durante optimización"
swe "Select %u reducerades vid optimiering"
ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii"
@@ -4820,75 +4737,57 @@ ER_TABLENAME_NOT_ALLOWED_HERE 42000
eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s"
ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden"
por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s"
- serbian "Client does not support authentication protocol requested by server; consider upgrading MySQL client"
spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s"
swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s"
- ukr "Table '%-.64s' from one of SELECT's can not be used in %-.32s"
ER_NOT_SUPPORTED_AUTH_MODE 08004
eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client"
ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client"
por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL"
- serbian "All parts of a SPATIAL index must be NOT NULL"
spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL"
swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet."
ER_SPATIAL_CANT_HAVE_NULL 42000
eng "All parts of a SPATIAL index must be NOT NULL"
- ger "Alle Teile eines SPATIAL KEY müssen als NOT NULL deklariert sein"
- por "Todas as partes de uma SPATIAL KEY devem ser NOT NULL"
- serbian "COLLATION '%s' is not valid for CHARACTER SET '%s'"
- spa "Todas las partes de una SPATIAL KEY deben ser NOT NULL"
- swe "Alla delar av en SPATIAL KEY måste vara NOT NULL"
- ukr "All parts of a SPATIAL KEY must be NOT NULL"
+ ger "Alle Teile eines SPATIAL index müssen als NOT NULL deklariert sein"
+ por "Todas as partes de uma SPATIAL index devem ser NOT NULL"
+ spa "Todas las partes de una SPATIAL index deben ser NOT NULL"
+ swe "Alla delar av en SPATIAL index måste vara NOT NULL"
ER_COLLATION_CHARSET_MISMATCH 42000
eng "COLLATION '%s' is not valid for CHARACTER SET '%s'"
ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig"
por "COLLATION '%s' não é válida para CHARACTER SET '%s'"
- serbian "Slave is already running"
spa "COLLATION '%s' no es válido para CHARACTER SET '%s'"
swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'"
ER_SLAVE_WAS_RUNNING
eng "Slave is already running"
ger "Slave läuft bereits"
por "O slave já está rodando"
- serbian "Slave has already been stopped"
spa "Slave ya está funcionando"
swe "Slaven har redan startat"
ER_SLAVE_WAS_NOT_RUNNING
eng "Slave has already been stopped"
ger "Slave wurde bereits angehalten"
por "O slave já está parado"
- serbian "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
spa "Slave ya fué parado"
swe "Slaven har redan stoppat"
ER_TOO_BIG_FOR_UNCOMPRESS
eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d"
por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
- serbian "ZLIB: Not enough memory"
spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)"
- swe "Too big size of uncompressed data. The maximum size is %d. (probably, length of uncompressed data was corrupted)"
- ukr "Too big size of uncompressed data. The maximum size is %d. (probably, length of uncompressed data was corrupted)"
ER_ZLIB_Z_MEM_ERROR
eng "ZLIB: Not enough memory"
ger "ZLIB: Steht nicht genug Speicher zur Verfügung"
por "ZLIB: Não suficiente memória disponível"
- serbian "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
spa "Z_MEM_ERROR: No suficiente memoria para zlib"
- swe "Z_MEM_ERROR: Not enough memory available for zlib"
- ukr "Z_MEM_ERROR: Not enough memory available for zlib"
ER_ZLIB_Z_BUF_ERROR
eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
- serbian "ZLIB: Input data corrupted"
spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)"
- swe "Z_BUF_ERROR: Not enough room in the output buffer for zlib (probably, length of uncompressed data was corrupted)"
- ukr "Z_BUF_ERROR: Not enough room in the output buffer for zlib (probably, length of uncompressed data was corrupted)"
ER_ZLIB_Z_DATA_ERROR
eng "ZLIB: Input data corrupted"
ger "ZLIB: Eingabedaten beschädigt"
por "ZLIB: Dados de entrada está corrupto"
- serbian "%d line(s) were cut by GROUP_CONCAT()"
spa "Z_DATA_ERROR: Dato de entrada fué corrompido para zlib"
swe "Z_DATA_ERROR: Input data was corrupted for zlib"
ukr "Z_DATA_ERROR: Input data was corrupted for zlib"
@@ -4896,7 +4795,6 @@ ER_CUT_VALUE_GROUP_CONCAT
eng "%d line(s) were cut by GROUP_CONCAT()"
ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten"
por "%d linha(s) foram cortada(s) por GROUP_CONCAT()"
- serbian "Row %ld doesn't contain data for all columns"
spa "%d línea(s) fue(fueron) cortadas por group_concat()"
swe "%d rad(er) kapades av group_concat()"
ukr "%d line(s) was(were) cut by group_concat()"
@@ -4904,92 +4802,70 @@ ER_WARN_TOO_FEW_RECORDS 01000
eng "Row %ld doesn't contain data for all columns"
ger "Anzahl der Datensätze in Zeile %ld geringer als Anzahl der Spalten"
por "Conta de registro é menor que a conta de coluna na linha %ld"
- serbian "Row %ld was truncated; it contained more data than there were input columns"
spa "Línea %ld no contiene datos para todas las columnas"
ER_WARN_TOO_MANY_RECORDS 01000
eng "Row %ld was truncated; it contained more data than there were input columns"
ger "Anzahl der Datensätze in Zeile %ld größer als Anzahl der Spalten"
por "Conta de registro é maior que a conta de coluna na linha %ld"
- serbian "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld"
spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada"
- swe "Row %ld was truncated; It contained more data than there were input columns"
- ukr "Row %ld was truncated; It contained more data than there were input columns"
ER_WARN_NULL_TO_NOTNULL 22004
eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld"
ger "Daten abgeschnitten, NULL für NOT NULL-Spalte '%s' in Zeile %ld angegeben"
por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld"
- serbian "Out of range value adjusted for column '%s' at row %ld"
spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld"
- swe "Data truncated, NULL supplied to NOT NULL column '%s' at row %ld"
- ukr "Data truncated, NULL supplied to NOT NULL column '%s' at row %ld"
ER_WARN_DATA_OUT_OF_RANGE 22003
eng "Out of range value adjusted for column '%s' at row %ld"
ger "Daten abgeschnitten, außerhalb des Wertebereichs für Spalte '%s' in Zeile %ld"
por "Dado truncado, fora de alcance para coluna '%s' na linha %ld"
- serbian "Data truncated for column '%s' at row %ld"
spa "Datos truncados, fuera de gama para columna '%s' en la línea %ld"
- swe "Data truncated, out of range for column '%s' at row %ld"
- ukr "Data truncated, out of range for column '%s' at row %ld"
ER_WARN_DATA_TRUNCATED 01000
eng "Data truncated for column '%s' at row %ld"
ger "Daten abgeschnitten für Spalte '%s' in Zeile %ld"
por "Dado truncado para coluna '%s' na linha %ld"
- serbian "Using storage engine %s for table '%s'"
spa "Datos truncados para columna '%s' en la línea %ld"
ER_WARN_USING_OTHER_HANDLER
eng "Using storage engine %s for table '%s'"
ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt"
por "Usando engine de armazenamento %s para tabela '%s'"
- serbian "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
spa "Usando motor de almacenamiento %s para tabla '%s'"
swe "Använder handler %s för tabell '%s'"
ER_CANT_AGGREGATE_2COLLATIONS
eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
ger "Unerlaubte Vermischung der Kollationen (%s,%s) und (%s,%s) für die Operation '%s'"
por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'"
- serbian "Cannot drop one or more of the requested users"
spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'"
ER_DROP_USER
eng "Cannot drop one or more of the requested users"
ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen"
- serbian "Can't revoke all privileges, grant for one or more of the requested users"
ER_REVOKE_GRANTS
eng "Can't revoke all privileges, grant for one or more of the requested users"
ger "Kann nicht alle Berechtigungen widerrufen, grant for one or more of the requested users"
por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos"
- serbian "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados"
ER_CANT_AGGREGATE_3COLLATIONS
eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
ger "Unerlaubte Vermischung der Kollationen (%s,%s), (%s,%s), (%s,%s) für die Operation '%s'"
por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'"
- serbian "Illegal mix of collations for operation '%s'"
spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'"
ER_CANT_AGGREGATE_NCOLLATIONS
eng "Illegal mix of collations for operation '%s'"
ger "Unerlaubte Vermischung der Kollationen für die Operation '%s'"
por "Ilegal combinação de collations para operação '%s'"
- serbian "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
spa "Ilegal mezcla de collations para operación '%s'"
ER_VARIABLE_IS_NOT_STRUCT
eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
ger "Variable '%-.64s' ist keine Variablen-Komponenten (kann nicht als XXXX.variablen_name verwendet werden)"
por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)"
- serbian "Unknown collation: '%-.64s'"
spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)"
- swe "Variable '%-.64s' is not a variable component (Can't be used as XXXX.variable_name)"
- ukr "Variable '%-.64s' is not a variable component (Can't be used as XXXX.variable_name)"
ER_UNKNOWN_COLLATION
eng "Unknown collation: '%-.64s'"
ger "Unbekannte Kollation: '%-.64s'"
por "Collation desconhecida: '%-.64s'"
- serbian "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started"
spa "Collation desconocida: '%-.64s'"
ER_SLAVE_IGNORED_SSL_PARAMS
eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started"
ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn der MySQL-Slave mit SSL gestartet wird"
por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado."
- serbian "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"
spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado"
swe "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started"
ukr "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started"
@@ -4998,626 +4874,361 @@ ER_SERVER_IS_IN_SECURE_AUTH_MODE
ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern"
por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato"
rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ"
- serbian "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d"
spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato"
ER_WARN_FIELD_RESOLVED
eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d"
ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst"
por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d"
rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d"
- serbian "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d"
ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d"
ER_BAD_SLAVE_UNTIL_COND
eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL"
por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL"
- serbian "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL"
- swe "Wrong parameter or combination of parameters for START SLAVE UNTIL"
- ukr "Wrong parameter or combination of parameters for START SLAVE UNTIL"
ER_MISSING_SKIP_SLAVE
- dan "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- nla "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart"
- est "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- fre "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn der Slave-Server unerwartet neu startet"
- greek "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- hun "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- ita "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- jpn "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- kor "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- nor "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- norwegian-ny "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- pol "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo"
- rum "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- rus "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
- serbian "SQL thread is not to be started so UNTIL options are ignored"
- slo "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart"
spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave"
- swe "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL, otherwise you are not safe in case of unexpected slave's mysqld restart"
- ukr "It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL, otherwise you are not safe in case of unexpected slave's mysqld restart"
ER_UNTIL_COND_IGNORED
eng "SQL thread is not to be started so UNTIL options are ignored"
ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert"
por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas"
- serbian "Incorrect index name '%-.100s'"
spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas"
ER_WRONG_NAME_FOR_INDEX 42000
eng "Incorrect index name '%-.100s'"
por "Incorreto nome de índice '%-.100s'"
- serbian "Incorrect catalog name '%-.100s'"
spa "Nombre de índice incorrecto '%-.100s'"
swe "Felaktigt index namn '%-.100s'"
ER_WRONG_NAME_FOR_CATALOG 42000
eng "Incorrect catalog name '%-.100s'"
por "Incorreto nome de catálogo '%-.100s'"
- serbian "Query cache failed to set size %lu, new query cache size is %lu"
spa "Nombre de catalog incorrecto '%-.100s'"
swe "Felaktigt katalog namn '%-.100s'"
ER_WARN_QC_RESIZE
- dan "Query cache failed to set size %lu, new query cache size is %lu"
- nla "Query cache failed to set size %lu, new query cache size is %lu"
eng "Query cache failed to set size %lu; new query cache size is %lu"
- est "Query cache failed to set size %lu, new query cache size is %lu"
- fre "Query cache failed to set size %lu, new query cache size is %lu"
- ger "Query cache failed to set size %lu, new query cache size is %lu"
- greek "Query cache failed to set size %lu, new query cache size is %lu"
- hun "Query cache failed to set size %lu, new query cache size is %lu"
- ita "Query cache failed to set size %lu, new query cache size is %lu"
- jpn "Query cache failed to set size %lu, new query cache size is %lu"
- kor "Query cache failed to set size %lu, new query cache size is %lu"
- nor "Query cache failed to set size %lu, new query cache size is %lu"
- norwegian-ny "Query cache failed to set size %lu, new query cache size is %lu"
- pol "Query cache failed to set size %lu, new query cache size is %lu"
por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu"
- rum "Query cache failed to set size %lu, new query cache size is %lu"
rus "ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu"
- serbian "Column '%-.64s' cannot be part of FULLTEXT index"
- slo "Query cache failed to set size %lu, new query cache size is %lu"
spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu"
swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu"
ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu"
ER_BAD_FT_COLUMN
eng "Column '%-.64s' cannot be part of FULLTEXT index"
por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT"
- serbian "Unknown key cache '%-.100s'"
spa "Columna '%-.64s' no puede ser parte de FULLTEXT index"
swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index"
ER_UNKNOWN_KEY_CACHE
eng "Unknown key cache '%-.100s'"
por "Key cache desconhecida '%-.100s'"
- serbian "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
spa "Desconocida key cache '%-.100s'"
swe "Okänd nyckel cache '%-.100s'"
ER_WARN_HOSTNAME_WONT_WORK
- dan "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- nla "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work"
- est "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- fre "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- ger "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- greek "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- hun "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- ita "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- jpn "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- kor "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- nor "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- norwegian-ny "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- pol "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar"
- rum "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- rus "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- serbian "Unknown table engine '%s'"
- slo "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar"
- swe "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
- ukr "MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work"
ER_UNKNOWN_STORAGE_ENGINE 42000
eng "Unknown table engine '%s'"
por "Motor de tabela desconhecido '%s'"
- serbian "'%s' is deprecated, use '%s' instead"
spa "Desconocido motor de tabla '%s'"
ER_WARN_DEPRECATED_SYNTAX
- dan "'%s' is deprecated, use '%s' instead"
- nla "'%s' is deprecated, use '%s' instead"
eng "'%s' is deprecated; use '%s' instead"
- est "'%s' is deprecated, use '%s' instead"
- fre "'%s' is deprecated, use '%s' instead"
- ger "'%s' is deprecated, use '%s' instead"
- greek "'%s' is deprecated, use '%s' instead"
- hun "'%s' is deprecated, use '%s' instead"
- ita "'%s' is deprecated, use '%s' instead"
- jpn "'%s' is deprecated, use '%s' instead"
- kor "'%s' is deprecated, use '%s' instead"
- nor "'%s' is deprecated, use '%s' instead"
- norwegian-ny "'%s' is deprecated, use '%s' instead"
- pol "'%s' is deprecated, use '%s' instead"
por "'%s' é desatualizado. Use '%s' em seu lugar"
- rum "'%s' is deprecated, use '%s' instead"
- rus "'%s' is deprecated, use '%s' instead"
- serbian "The target table %-.100s of the %s is not updatable"
- slo "'%s' is deprecated, use '%s' instead"
spa "'%s' está desaprobado, use '%s' en su lugar"
- swe "'%s' is deprecated, use '%s' instead"
- ukr "'%s' is deprecated, use '%s' instead"
ER_NON_UPDATABLE_TABLE
- dan "The target table %-.100s of the %s is not updateable"
- nla "The target table %-.100s of the %s is not updateable"
eng "The target table %-.100s of the %s is not updatable"
- est "The target table %-.100s of the %s is not updateable"
- fre "The target table %-.100s of the %s is not updateable"
- ger "The target table %-.100s of the %s is not updateable"
- greek "The target table %-.100s of the %s is not updateable"
- hun "The target table %-.100s of the %s is not updateable"
- ita "The target table %-.100s of the %s is not updateable"
- jpn "The target table %-.100s of the %s is not updateable"
- kor "The target table %-.100s of the %s is not updateable"
- nor "The target table %-.100s of the %s is not updateable"
- norwegian-ny "The target table %-.100s of the %s is not updateable"
- pol "The target table %-.100s of the %s is not updateable"
por "A tabela destino %-.100s do %s não é atualizável"
- rum "The target table %-.100s of the %s is not updateable"
rus "ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ"
- serbian "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- slo "The target table %-.100s of the %s is not updateable"
spa "La tabla destino %-.100s del %s no es actualizable"
swe "Tabel %-.100s använd med '%s' är inte uppdateringsbar"
ukr "ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ"
ER_FEATURE_DISABLED
- dan "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- nla "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working"
- est "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- fre "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- ger "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- greek "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- hun "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- ita "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- jpn "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- kor "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- nor "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- norwegian-ny "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- pol "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
- rum "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- rus "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
- serbian "The MySQL server is running with the %s option so it cannot execute this statement"
- slo "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
- ukr "The '%s' feature was disabled; you need MySQL built with '%s' to have it working"
ER_OPTION_PREVENTS_STATEMENT
eng "The MySQL server is running with the %s option so it cannot execute this statement"
por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando"
- serbian "Column '%-.100s' has duplicated value '%-.64s' in %s"
spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando"
swe "MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando"
ER_DUPLICATED_VALUE_IN_TYPE
eng "Column '%-.100s' has duplicated value '%-.64s' in %s"
por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s"
- serbian "Truncated wrong %-.32s value: '%-.128s'"
spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s"
ER_TRUNCATED_WRONG_VALUE 22007
- dan "Truncated wrong %-.32s value: '%-.128s'"
- nla "Truncated wrong %-.32s value: '%-.128s'"
eng "Truncated incorrect %-.32s value: '%-.128s'"
- est "Truncated wrong %-.32s value: '%-.128s'"
- fre "Truncated wrong %-.32s value: '%-.128s'"
- ger "Truncated wrong %-.32s value: '%-.128s'"
- greek "Truncated wrong %-.32s value: '%-.128s'"
- hun "Truncated wrong %-.32s value: '%-.128s'"
- ita "Truncated wrong %-.32s value: '%-.128s'"
- jpn "Truncated wrong %-.32s value: '%-.128s'"
- kor "Truncated wrong %-.32s value: '%-.128s'"
- nor "Truncated wrong %-.32s value: '%-.128s'"
- norwegian-ny "Truncated wrong %-.32s value: '%-.128s'"
- pol "Truncated wrong %-.32s value: '%-.128s'"
por "Truncado errado %-.32s valor: '%-.128s'"
- rum "Truncated wrong %-.32s value: '%-.128s'"
- rus "Truncated wrong %-.32s value: '%-.128s'"
- serbian "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
- slo "Truncated wrong %-.32s value: '%-.128s'"
spa "Equivocado truncado %-.32s valor: '%-.128s'"
- swe "Truncated wrong %-.32s value: '%-.128s'"
- ukr "Truncated wrong %-.32s value: '%-.128s'"
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS
eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
- jpn "Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula"
- serbian "Invalid ON UPDATE clause for '%-.64s' column"
spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula"
swe "Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
ukr "Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
ER_INVALID_ON_UPDATE
eng "Invalid ON UPDATE clause for '%-.64s' column"
por "Inválida cláusula ON UPDATE para campo '%-.64s'"
- serbian "This command is not supported in the prepared statement protocol yet"
spa "Inválido ON UPDATE cláusula para campo '%-.64s'"
swe "Invalid ON UPDATE clause for '%-.64s' field"
ukr "Invalid ON UPDATE clause for '%-.64s' field"
ER_UNSUPPORTED_PS
eng "This command is not supported in the prepared statement protocol yet"
- serbian "Got error %d '%-.100s' from %s"
ER_GET_ERRMSG
dan "Modtog fejl %d '%-.100s' fra %s"
eng "Got error %d '%-.100s' from %s"
jpn "Got NDB error %d '%-.100s'"
nor "Mottok feil %d '%-.100s' fa %s"
norwegian-ny "Mottok feil %d '%-.100s' fra %s"
- serbian "Got temporary error %d '%-.100s' from %s"
ER_GET_TEMPORARY_ERRMSG
dan "Modtog temporary fejl %d '%-.100s' fra %s"
eng "Got temporary error %d '%-.100s' from %s"
jpn "Got temporary NDB error %d '%-.100s'"
nor "Mottok temporary feil %d '%-.100s' fra %s"
norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s"
- serbian "Unknown or incorrect time zone: '%-.64s'"
ER_UNKNOWN_TIME_ZONE
eng "Unknown or incorrect time zone: '%-.64s'"
- serbian "Invalid TIMESTAMP value in column '%s' at row %ld"
ER_WARN_INVALID_TIMESTAMP
eng "Invalid TIMESTAMP value in column '%s' at row %ld"
- serbian "Invalid %s character string: '%.64s'"
ER_INVALID_CHARACTER_STRING
eng "Invalid %s character string: '%.64s'"
- serbian "Result of %s() was larger than max_allowed_packet (%ld) - truncated"
ER_WARN_ALLOWED_PACKET_OVERFLOWED
eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated"
- serbian "Conflicting declarations: '%s%s' and '%s%s'"
ER_CONFLICTING_DECLARATIONS
eng "Conflicting declarations: '%s%s' and '%s%s'"
- serbian "Can't create a %s from within another stored routine"
ER_SP_NO_RECURSIVE_CREATE 2F003
eng "Can't create a %s from within another stored routine"
- serbian "%s %s already exists"
ER_SP_ALREADY_EXISTS 42000
eng "%s %s already exists"
- serbian "%s %s does not exist"
ER_SP_DOES_NOT_EXIST 42000
eng "%s %s does not exist"
- serbian "Failed to DROP %s %s"
ER_SP_DROP_FAILED
eng "Failed to DROP %s %s"
- serbian "Failed to CREATE %s %s"
ER_SP_STORE_FAILED
eng "Failed to CREATE %s %s"
- serbian "%s with no matching label: %s"
ER_SP_LILABEL_MISMATCH 42000
eng "%s with no matching label: %s"
- serbian "Redefining label %s"
ER_SP_LABEL_REDEFINE 42000
eng "Redefining label %s"
- serbian "End-label %s without match"
ER_SP_LABEL_MISMATCH 42000
eng "End-label %s without match"
- serbian "Referring to uninitialized variable %s"
ER_SP_UNINIT_VAR 01000
eng "Referring to uninitialized variable %s"
- serbian "SELECT in a stored procedure must have INTO"
ER_SP_BADSELECT 0A000
eng "SELECT in a stored procedure must have INTO"
- serbian "RETURN is only allowed in a FUNCTION"
ER_SP_BADRETURN 42000
eng "RETURN is only allowed in a FUNCTION"
- serbian "Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION"
ER_SP_BADSTATEMENT 0A000
eng "Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION"
- serbian "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored"
ER_UPDATE_LOG_DEPRECATED_IGNORED 42000
eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored"
- serbian "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000
eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
- rus "The update log is deprecated and replaced by the binary log SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
- serbian "Query execution was interrupted"
ER_QUERY_INTERRUPTED 70100
eng "Query execution was interrupted"
- serbian "Incorrect number of arguments for %s %s; expected %u, got %u"
ER_SP_WRONG_NO_OF_ARGS 42000
eng "Incorrect number of arguments for %s %s; expected %u, got %u"
- ger "Incorrect number of arguments for %s %s, expected %u, got %u"
- serbian "Undefined CONDITION: %s"
ER_SP_COND_MISMATCH 42000
eng "Undefined CONDITION: %s"
- serbian "No RETURN found in FUNCTION %s"
ER_SP_NORETURN 42000
eng "No RETURN found in FUNCTION %s"
- serbian "FUNCTION %s ended without RETURN"
ER_SP_NORETURNEND 2F005
eng "FUNCTION %s ended without RETURN"
- serbian "Cursor statement must be a SELECT"
ER_SP_BAD_CURSOR_QUERY 42000
eng "Cursor statement must be a SELECT"
- serbian "Cursor SELECT must not have INTO"
ER_SP_BAD_CURSOR_SELECT 42000
eng "Cursor SELECT must not have INTO"
- serbian "Undefined CURSOR: %s"
ER_SP_CURSOR_MISMATCH 42000
eng "Undefined CURSOR: %s"
- serbian "Cursor is already open"
ER_SP_CURSOR_ALREADY_OPEN 24000
eng "Cursor is already open"
- serbian "Cursor is not open"
ER_SP_CURSOR_NOT_OPEN 24000
eng "Cursor is not open"
- serbian "Undeclared variable: %s"
ER_SP_UNDECLARED_VAR 42000
eng "Undeclared variable: %s"
- serbian "Incorrect number of FETCH variables"
ER_SP_WRONG_NO_OF_FETCH_ARGS
eng "Incorrect number of FETCH variables"
- serbian "No data to FETCH"
ER_SP_FETCH_NO_DATA 02000
eng "No data to FETCH"
- serbian "Duplicate parameter: %s"
ER_SP_DUP_PARAM 42000
eng "Duplicate parameter: %s"
- serbian "Duplicate variable: %s"
ER_SP_DUP_VAR 42000
eng "Duplicate variable: %s"
- serbian "Duplicate condition: %s"
ER_SP_DUP_COND 42000
eng "Duplicate condition: %s"
- serbian "Duplicate cursor: %s"
ER_SP_DUP_CURS 42000
eng "Duplicate cursor: %s"
- serbian "Failed to ALTER %s %s"
ER_SP_CANT_ALTER
eng "Failed to ALTER %s %s"
- serbian "Subselect value not supported"
ER_SP_SUBSELECT_NYI 0A000
eng "Subselect value not supported"
- serbian "USE is not allowed in a stored procedure"
ER_SP_NO_USE 42000
eng "USE is not allowed in a stored procedure"
- serbian "Variable or condition declaration after cursor or handler declaration"
ER_SP_VARCOND_AFTER_CURSHNDLR 42000
eng "Variable or condition declaration after cursor or handler declaration"
- serbian "Cursor declaration after handler declaration"
ER_SP_CURSOR_AFTER_HANDLER 42000
eng "Cursor declaration after handler declaration"
- serbian "Case not found for CASE statement"
ER_SP_CASE_NOT_FOUND 20000
eng "Case not found for CASE statement"
- serbian "Configuration file '%-.64s' is too big"
ER_FPARSER_TOO_BIG_FILE
eng "Configuration file '%-.64s' is too big"
rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'"
- serbian "Malformed file type header in file '%-.64s'"
ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'"
ER_FPARSER_BAD_HEADER
eng "Malformed file type header in file '%-.64s'"
rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'"
- serbian "Unexpected end of file while parsing comment '%-.64s'"
ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'"
ER_FPARSER_EOF_IN_COMMENT
eng "Unexpected end of file while parsing comment '%-.64s'"
rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'"
- serbian "Error while parsing parameter '%-.64s' (line: '%-.64s')"
ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'"
ER_FPARSER_ERROR_IN_PARAMETER
eng "Error while parsing parameter '%-.64s' (line: '%-.64s')"
rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')"
- serbian "Unexpected end of file while skipping unknown parameter '%-.64s'"
ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')"
ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER
eng "Unexpected end of file while skipping unknown parameter '%-.64s'"
rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'"
- serbian "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table"
ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'"
ER_VIEW_NO_EXPLAIN
eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table"
rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ"
- serbian "File '%-.64s' has unknown type '%-.64s' in its header"
ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ"
ER_FRM_UNKNOWN_TYPE
eng "File '%-.64s' has unknown type '%-.64s' in its header"
rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ"
- serbian "'%-.64s.%-.64s' is not %s"
ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ"
ER_WRONG_OBJECT
eng "'%-.64s.%-.64s' is not %s"
rus "'%-.64s.%-.64s' - ÎÅ %s"
- serbian "Column '%-.64s' is not updatable"
ukr "'%-.64s.%-.64s' ÎÅ ¤ %s"
ER_NONUPDATEABLE_COLUMN
eng "Column '%-.64s' is not updatable"
rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ"
- serbian "View's SELECT contains a subquery in the FROM clause"
ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ"
ER_VIEW_SELECT_DERIVED
eng "View's SELECT contains a subquery in the FROM clause"
rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM"
- serbian "View's SELECT contains a '%s' clause"
ukr "View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM"
ER_VIEW_SELECT_CLAUSE
eng "View's SELECT contains a '%s' clause"
rus "View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'"
- serbian "View's SELECT contains a variable or parameter"
ukr "View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'"
ER_VIEW_SELECT_VARIABLE
eng "View's SELECT contains a variable or parameter"
rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ"
- serbian "View's SELECT contains a temporary table '%-.64s'"
ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ"
ER_VIEW_SELECT_TMPTABLE
eng "View's SELECT contains a temporary table '%-.64s'"
rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'"
- serbian "View's SELECT and view's field list have different column counts"
ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'"
ER_VIEW_WRONG_LIST
eng "View's SELECT and view's field list have different column counts"
rus "View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
- serbian "View merge algorithm can't be used here for now (assumed undefined algorithm)"
ukr "View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØ˦ÓÔØ ÓËÏ×Âæ×"
ER_WARN_VIEW_MERGE
eng "View merge algorithm can't be used here for now (assumed undefined algorithm)"
rus "áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)"
- serbian "View being updated does not have complete key of underlying table in it"
ukr "áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)"
ER_WARN_VIEW_WITHOUT_KEY
eng "View being updated does not have complete key of underlying table in it"
rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)"
- serbian "View '%-.64s.%-.64s' references invalid table(s) or column(s)"
ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ"
ER_VIEW_INVALID
- eng "View '%-.64s.%-.64s' references invalid table(s) or column(s)"
- rus "View '%-.64s.%-.64s' ÓÓÙÌÁÅÔÓÑ ÎÁ ÎÅÓÕÝÅÓÔ×ÕÀÝÉÅ ÔÁÂÌÉÃÙ ÉÌÉ ÓÔÏÌÂÃÙ"
- serbian "Can't drop a %s from within another stored routine"
- ukr "View '%-.64s.%-.64s' ÐÏÓÉÌÁ¤ÔÓÑ ÎÁ ÎŦÓÎÕÀÞ¦ ÔÁÂÌÉæ ÁÂÏ ÓÔÏ×Âæ"
+ eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s)"
+ rus "View '%-.64s.%-.64s' ÓÓÙÌÁÅÔÓÑ ÎÁ ÎÅÓÕÝÅÓÔ×ÕÀÝÉÅ ÔÁÂÌÉÃÙ ÉÌÉ ÓÔÏÌÂÃÙ ÉÌÉ ÆÕÎËÃÉÉ"
ER_SP_NO_DROP_SP
eng "Can't drop a %s from within another stored routine"
- serbian "GOTO is not allowed in a stored procedure handler"
ER_SP_GOTO_IN_HNDLR
eng "GOTO is not allowed in a stored procedure handler"
- serbian "Trigger already exists"
ER_TRG_ALREADY_EXISTS
eng "Trigger already exists"
- serbian "Trigger does not exist"
ER_TRG_DOES_NOT_EXIST
eng "Trigger does not exist"
- serbian "Trigger's '%-.64s' is view or temporary table"
ER_TRG_ON_VIEW_OR_TEMP_TABLE
eng "Trigger's '%-.64s' is view or temporary table"
- serbian "Updating of %s row is not allowed in %strigger"
ER_TRG_CANT_CHANGE_ROW
eng "Updating of %s row is not allowed in %strigger"
- serbian "There is no %s row in %s trigger"
ER_TRG_NO_SUCH_ROW_IN_TRG
eng "There is no %s row in %s trigger"
- serbian "Field '%-.64s' doesn't have a default value"
ER_NO_DEFAULT_FOR_FIELD
eng "Field '%-.64s' doesn't have a default value"
- serbian "Division by 0"
ER_DIVISION_BY_ZERO 22012
eng "Division by 0"
- serbian "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld"
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD
eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld"
- serbian "Illegal %s '%-.64s' value found during parsing"
ER_ILLEGAL_VALUE_FOR_TYPE 22007
eng "Illegal %s '%-.64s' value found during parsing"
- serbian "CHECK OPTION on non-updatable view '%-.64s.%-.64s'"
ER_VIEW_NONUPD_CHECK
eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'"
rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'"
- serbian "CHECK OPTION failed '%-.64s.%-.64s'"
ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ"
ER_VIEW_CHECK_FAILED
eng "CHECK OPTION failed '%-.64s.%-.64s'"
rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ"
- serbian "Access denied; you are not the procedure/function definer of '%s'"
ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ"
ER_SP_ACCESS_DENIED_ERROR 42000
eng "Access denied; you are not the procedure/function definer of '%s'"
- serbian "Failed purging old relay logs: %s"
ER_RELAY_LOG_FAIL
eng "Failed purging old relay logs: %s"
- serbian "Password hash should be a %d-digit hexadecimal number"
ER_PASSWD_LENGTH
eng "Password hash should be a %d-digit hexadecimal number"
- serbian "Target log not found in binlog index"
ER_UNKNOWN_TARGET_BINLOG
eng "Target log not found in binlog index"
- serbian "I/O error reading log index file"
ER_IO_ERR_LOG_INDEX_READ
eng "I/O error reading log index file"
- serbian "Server configuration does not permit binlog purge"
ER_BINLOG_PURGE_PROHIBITED
eng "Server configuration does not permit binlog purge"
- serbian "Failed on fseek()"
ER_FSEEK_FAIL
eng "Failed on fseek()"
- serbian "Fatal error during log purge"
ER_BINLOG_PURGE_FATAL_ERR
eng "Fatal error during log purge"
- serbian "A purgeable log is in use, will not purge"
ER_LOG_IN_USE
eng "A purgeable log is in use, will not purge"
- serbian "Unknown error during log purge"
ER_LOG_PURGE_UNKNOWN_ERR
eng "Unknown error during log purge"
- serbian "Failed initializing relay log position: %s"
ER_RELAY_LOG_INIT
eng "Failed initializing relay log position: %s"
- serbian "You are not using binary logging"
ER_NO_BINARY_LOGGING
eng "You are not using binary logging"
- serbian "The '%-.64s' syntax is reserved for purposes internal to the MySQL server"
ER_RESERVED_SYNTAX
eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server"
- serbian "WSAStartup Failed"
ER_WSAS_FAILED
eng "WSAStartup Failed"
- serbian "Can't handle procedures with differents groups yet"
ER_DIFF_GROUPS_PROC
eng "Can't handle procedures with differents groups yet"
- serbian "Select must have a group with this procedure"
ER_NO_GROUP_FOR_PROC
eng "Select must have a group with this procedure"
- serbian "Can't use ORDER clause with this procedure"
ER_ORDER_WITH_PROC
eng "Can't use ORDER clause with this procedure"
- serbian "Binary logging and replication forbid changing the global server %s"
ER_LOGING_PROHIBIT_CHANGING_OF
eng "Binary logging and replication forbid changing the global server %s"
- serbian "Can't map file: %-.64s, errno: %d"
ER_NO_FILE_MAPPING
eng "Can't map file: %-.64s, errno: %d"
- serbian "Wrong magic in %-.64s"
ER_WRONG_MAGIC
eng "Wrong magic in %-.64s"
- serbian "Prepared statement contains too many placeholders"
ER_PS_MANY_PARAM
eng "Prepared statement contains too many placeholders"
- serbian "Key part '%-.64s' length cannot be 0"
ER_KEY_PART_0
eng "Key part '%-.64s' length cannot be 0"
- serbian "View text checksum failed"
ER_VIEW_CHECKSUM
eng "View text checksum failed"
rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ"
- serbian "Can not modify more than one base table through a join view '%-.64s.%-.64s'"
ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ"
ER_VIEW_MULTIUPDATE
eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'"
rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'"
- serbian "Can not insert into join view '%-.64s.%-.64s' without fields list"
ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
ER_VIEW_NO_INSERT_FIELD_LIST
eng "Can not insert into join view '%-.64s.%-.64s' without fields list"
rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ"
- serbian "Can not delete from join view '%-.64s.%-.64s'"
ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×"
ER_VIEW_DELETE_MERGE_VIEW
eng "Can not delete from join view '%-.64s.%-.64s'"
rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'"
- serbian "Operation %s failed for '%.256s'"
ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
ER_CANNOT_USER
- cze "Operation %s failed for '%.256s'"
- dan "Operation %s failed for '%.256s'"
- nla "Operation %s failed for '%.256s'"
eng "Operation %s failed for %.256s"
- est "Operation %s failed for '%.256s'"
- fre "Operation %s failed for '%.256s'"
ger "Das Kommando %s scheiterte für %.256s"
- greek "Operation %s failed for '%.256s'"
- hun "Operation %s failed for '%.256s'"
- ita "Operation %s failed for '%.256s'"
- jpn "Operation %s failed for '%.256s'"
- kor "Operation %s failed for '%.256s'"
- nor "Operation %s failed for '%.256s'"
norwegian-ny "Operation %s failed for '%.256s'"
- pol "Operation %s failed for '%.256s'"
- por "Operation %s failed for '%.256s'"
- rum "Operation %s failed for '%.256s'"
- rus "Operation %s failed for '%.256s'"
- serbian ""
- slo "Operation %s failed for '%.256s'"
- spa "Operation %s failed for '%.256s'"
- swe "Operation %s failed for '%.256s'"
- ukr "Operation %s failed for '%.256s'"
diff --git a/sql/sp.cc b/sql/sp.cc
index 9eff1655711..4605d49f3ab 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -981,6 +981,7 @@ sp_cache_functions(THD *thd, LEX *lex)
thd->lex= newlex;
newlex->proc_table= oldlex->proc_table; // hint if mysql.oper is opened
+ newlex->current_select= NULL;
name.m_name.str= strchr(name.m_qname.str, '.');
name.m_db.length= name.m_name.str - name.m_qname.str;
name.m_db.str= strmake_root(thd->mem_root,
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index b4a2f368bc2..2500769ee30 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2467,6 +2467,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
bool found_unaliased_non_uniq= 0;
uint unaliased_counter;
+ LINT_INIT(unaliased_counter); // Dependent on found_unaliased
+
*unaliased= FALSE;
if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM)
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 69511018880..1d4b911bb65 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -140,6 +140,18 @@ int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list)
derived_result->set_table(table);
exit:
+ /* Hide "Unknown column" or "Unknown function" error */
+ if (orig_table_list->view)
+ {
+ if (thd->net.last_errno == ER_BAD_FIELD_ERROR ||
+ thd->net.last_errno == ER_SP_DOES_NOT_EXIST)
+ {
+ thd->clear_error();
+ my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db,
+ orig_table_list->real_name);
+ }
+ }
+
/*
if it is preparation PS only or commands that need only VIEW structure
then we do not need real data and we can skip execution (and parameters
@@ -256,13 +268,8 @@ int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list)
unit->cleanup();
}
else
- {
- free_tmp_table(thd, table);
unit->cleanup();
- }
lex->current_select= save_current_select;
- if (res)
- free_tmp_table(thd, table);
}
return res;
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 12e4d912f15..2205ec504e9 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -567,8 +567,12 @@ int yylex(void *arg, void *yythd)
state= MY_LEX_HEX_NUMBER;
break;
}
- /* Fall through */
- case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling
+ case MY_LEX_IDENT_OR_BIN:
+ if (yyPeek() == '\'')
+ { // Found b'bin-number'
+ state= MY_LEX_BIN_NUMBER;
+ break;
+ }
case MY_LEX_IDENT:
uchar *start;
#if defined(USE_MB) && defined(USE_MB_IDENT)
@@ -689,6 +693,20 @@ int yylex(void *arg, void *yythd)
}
yyUnget();
}
+ else if (c == 'b' && (lex->ptr - lex->tok_start) == 2 &&
+ lex->tok_start[0] == '0' )
+ { // b'bin-number'
+ while (my_isxdigit(cs,(c = yyGet()))) ;
+ if ((lex->ptr - lex->tok_start) >= 4 && !ident_map[c])
+ {
+ yylval->lex_str= get_token(lex, yyLength());
+ yylval->lex_str.str+= 2; // Skip 0x
+ yylval->lex_str.length-= 2;
+ lex->yytoklen-= 2;
+ return (BIN_NUM);
+ }
+ yyUnget();
+ }
// fall through
case MY_LEX_IDENT_START: // We come here after '.'
result_state= IDENT;
@@ -801,6 +819,19 @@ int yylex(void *arg, void *yythd)
lex->yytoklen-=3;
return (HEX_NUM);
+ case MY_LEX_BIN_NUMBER: // Found b'bin-string'
+ yyGet(); // Skip '
+ while ((c= yyGet()) == '0' || c == '1');
+ length= (lex->ptr - lex->tok_start); // Length of bin-num + 3
+ if (c != '\'')
+ return(ABORT_SYM); // Illegal hex constant
+ yyGet(); // get_token makes an unget
+ yylval->lex_str= get_token(lex, length);
+ yylval->lex_str.str+= 2; // Skip b'
+ yylval->lex_str.length-= 3; // Don't count b' and last '
+ lex->yytoklen-= 3;
+ return (BIN_NUM);
+
case MY_LEX_CMP_OP: // Incomplete comparison operator
if (state_map[yyPeek()] == MY_LEX_CMP_OP ||
state_map[yyPeek()] == MY_LEX_LONG_CMP_OP)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 50c346619ee..01e80653372 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1583,6 +1583,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
packet, (uint) (pend-packet), thd->charset());
table_list.alias= table_list.real_name= conv_name.str;
packet= pend+1;
+
+ if (!my_strcasecmp(system_charset_info, table_list.db,
+ information_schema_name.str))
+ {
+ ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias);
+ if (schema_table)
+ table_list.schema_table= schema_table;
+ }
+
/* command not cachable => no gap for data base name */
if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1)))
break;
@@ -4912,11 +4921,9 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
break;
case MYSQL_TYPE_VARCHAR:
/*
- We can't use pack_length as this includes the field length
Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table
if they don't have a default value
*/
- new_field->key_length= new_field->length;
max_field_charlength= MAX_FIELD_VARCHARLENGTH;
break;
case MYSQL_TYPE_STRING:
@@ -5084,6 +5091,19 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
case MYSQL_TYPE_VAR_STRING:
DBUG_ASSERT(0); // Impossible
break;
+ case MYSQL_TYPE_BIT:
+ {
+ if (!length)
+ new_field->length= 1;
+ if (new_field->length > MAX_BIT_FIELD_LENGTH)
+ {
+ my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name,
+ MAX_BIT_FIELD_LENGTH);
+ DBUG_RETURN(1);
+ }
+ new_field->pack_length= (new_field->length + 7) / 8;
+ break;
+ }
}
if (!(new_field->flags & BLOB_FLAG) &&
@@ -5104,16 +5124,12 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name);
DBUG_RETURN(1);
}
- if (!new_field->pack_length)
- new_field->pack_length= calc_pack_length(new_field->sql_type,
- new_field->length);
- if (!new_field->key_length)
- new_field->key_length= new_field->pack_length;
lex->create_list.push_back(new_field);
lex->last_field=new_field;
DBUG_RETURN(0);
}
+
/* Store position for column in ALTER TABLE .. ADD column */
void store_position_for_column(const char *name)
@@ -5279,7 +5295,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
information_schema_name.str))
{
ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->real_name);
- if (!schema_table)
+ if (!schema_table ||
+ (schema_table->hidden &&
+ lex->orig_sql_command == SQLCOM_END)) // not a 'show' command
{
my_error(ER_UNKNOWN_TABLE, MYF(0),
ptr->real_name, information_schema_name.str);
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 6d59d465445..8afefe3cae8 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2041,10 +2041,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt,
thd->cleanup_after_query();
if (stmt->state == Item_arena::PREPARED)
- {
- thd->current_arena= thd;
stmt->state= Item_arena::EXECUTED;
- }
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ed3606856a0..570774c8054 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -64,6 +64,7 @@ static void best_extension_by_limited_search(JOIN *join,
uint prune_level);
static uint determine_search_depth(JOIN* join);
static int join_tab_cmp(const void* ptr1, const void* ptr2);
+static int join_tab_cmp_straight(const void* ptr1, const void* ptr2);
/*
TODO: 'find_best' is here only temporarily until 'greedy_search' is
tested and approved.
@@ -235,9 +236,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result)
res|= thd->net.report_error;
if (unlikely(res))
{
- /*
- If we have real error reported erly then this will be ignored
- */
+ /* If we had a another error reported earlier then this will be ignored */
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
result->abort();
}
@@ -3680,22 +3679,26 @@ choose_plan(JOIN *join, table_map join_tables)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
uint prune_level= join->thd->variables.optimizer_prune_level;
-
+ bool straight_join= join->select_options & SELECT_STRAIGHT_JOIN;
DBUG_ENTER("choose_plan");
- if (join->select_options & SELECT_STRAIGHT_JOIN)
+ /*
+ if (SELECT_STRAIGHT_JOIN option is set)
+ reorder tables so dependent tables come after tables they depend
+ on, otherwise keep tables in the order they were specified in the query
+ else
+ Apply heuristic: pre-sort all access plans with respect to the number of
+ records accessed.
+ */
+ qsort(join->best_ref + join->const_tables, join->tables - join->const_tables,
+ sizeof(JOIN_TAB*), straight_join?join_tab_cmp_straight:join_tab_cmp);
+
+ if (straight_join)
{
optimize_straight_join(join, join_tables);
}
else
{
- /*
- Heuristic: pre-sort all access plans with respect to the number of
- records accessed.
- */
- qsort(join->best_ref + join->const_tables, join->tables - join->const_tables,
- sizeof(JOIN_TAB*), join_tab_cmp);
-
if (search_depth == MAX_TABLES+2)
{ /*
TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before
@@ -3752,6 +3755,23 @@ join_tab_cmp(const void* ptr1, const void* ptr2)
}
+/*
+ Same as join_tab_cmp, but for use with SELECT_STRAIGHT_JOIN.
+*/
+
+static int
+join_tab_cmp_straight(const void* ptr1, const void* ptr2)
+{
+ JOIN_TAB *jt1= *(JOIN_TAB**) ptr1;
+ JOIN_TAB *jt2= *(JOIN_TAB**) ptr2;
+
+ if (jt1->dependent & jt2->table->map)
+ return 1;
+ if (jt2->dependent & jt1->table->map)
+ return -1;
+ return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0);
+}
+
/*
Heuristic procedure to automatically guess a reasonable degree of
exhaustiveness for the greedy search procedure.
@@ -3834,7 +3854,7 @@ optimize_straight_join(JOIN *join, table_map join_tables)
uint idx= join->const_tables;
double record_count= 1.0;
double read_time= 0.0;
-
+
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
{
/* Find the best access method from 's' to the current partial plan */
@@ -4252,7 +4272,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
memcpy((gptr) join->best_positions,(gptr) join->positions,
sizeof(POSITION)*idx);
- join->best_read=read_time;
+ join->best_read= read_time - 0.001;
}
return;
}
@@ -4873,7 +4893,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
&keyinfo->key_part[i],
(char*) key_buff,maybe_null);
/*
- Remeber if we are going to use REF_OR_NULL
+ Remember if we are going to use REF_OR_NULL
But only if field _really_ can be null i.e. we force JT_REF
instead of JT_REF_OR_NULL in case if field can't be null
*/
@@ -7538,7 +7558,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field,
{
Field *new_field;
- if (convert_blob_length && org_field->flags & BLOB_FLAG)
+ if (convert_blob_length && (org_field->flags & BLOB_FLAG))
new_field= new Field_varstring(convert_blob_length,
org_field->maybe_null(),
org_field->field_name, table,
@@ -7777,6 +7797,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
KEY_PART_INFO *key_part_info;
Item **copy_func;
MI_COLUMNDEF *recinfo;
+ uint total_uneven_bit_length= 0;
DBUG_ENTER("create_tmp_table");
DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
(int) distinct, (int) save_sum_fields,
@@ -7805,7 +7826,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
else for (ORDER *tmp=group ; tmp ; tmp=tmp->next)
{
(*tmp->item)->marker=4; // Store null in key
- if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH)
+ if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB)
using_unique_constraint=1;
}
if (param->group_length >= MAX_BLOB_WIDTH)
@@ -7966,6 +7987,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
reclength+=new_field->pack_length();
if (!(new_field->flags & NOT_NULL_FLAG))
null_count++;
+ if (new_field->type() == FIELD_TYPE_BIT)
+ total_uneven_bit_length+= new_field->field_length & 7;
if (new_field->flags & BLOB_FLAG)
{
*blob_field++= new_field;
@@ -8014,7 +8037,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
null_count++;
}
hidden_null_pack_length=(hidden_null_count+7)/8;
- null_pack_length=hidden_null_count+(null_count+7)/8;
+ null_pack_length= hidden_null_count +
+ (null_count + total_uneven_bit_length + 7) / 8;
reclength+=null_pack_length;
if (!reclength)
reclength=1; // Dummy select
@@ -8147,37 +8171,40 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
key_part_info->null_bit=0;
key_part_info->field= field;
key_part_info->offset= field->offset();
- key_part_info->length= (uint16) field->pack_length();
+ key_part_info->length= (uint16) field->key_length();
key_part_info->type= (uint8) field->key_type();
key_part_info->key_type =
((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ?
+ (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
+ (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
0 : FIELDFLAG_BINARY;
if (!using_unique_constraint)
{
group->buff=(char*) group_buff;
- if (!(group->field=field->new_field(thd->mem_root,table)))
+ if (!(group->field= field->new_key_field(thd->mem_root,table,
+ (char*) group_buff +
+ test(maybe_null),
+ field->null_ptr,
+ field->null_bit)))
goto err; /* purecov: inspected */
if (maybe_null)
{
/*
- To be able to group on NULL, we reserve place in group_buff
- for the NULL flag just before the column.
+ To be able to group on NULL, we reserved place in group_buff
+ for the NULL flag just before the column. (see above).
The field data is after this flag.
- The NULL flag is updated by 'end_update()' and 'end_write()'
+ The NULL flag is updated in 'end_update()' and 'end_write()'
*/
keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL
key_part_info->null_bit=field->null_bit;
key_part_info->null_offset= (uint) (field->null_ptr -
(uchar*) table->record[0]);
- group->field->move_field((char*) ++group->buff);
- group_buff++;
+ group->buff++; // Pointer to field data
+ group_buff++; // Skipp null flag
}
- else
- group->field->move_field((char*) group_buff);
/* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */
key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL;
- group_buff+= key_part_info->length;
+ group_buff+= group->field->pack_length();
}
keyinfo->key_length+= key_part_info->length;
}
@@ -8241,7 +8268,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
key_part_info->type= (uint8) (*reg_field)->key_type();
key_part_info->key_type =
((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ?
+ (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
+ (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
0 : FIELDFLAG_BINARY;
}
}
@@ -8291,8 +8319,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
MI_KEYDEF keydef;
MI_UNIQUEDEF uniquedef;
KEY *keyinfo=param->keyinfo;
-
DBUG_ENTER("create_myisam_tmp_table");
+
if (table->keys)
{ // Get keys for ni_create
bool using_unique_constraint=0;
@@ -8340,19 +8368,18 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
{
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
- HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT);
- seg->bit_start=seg->length - table->blob_ptr_size;
+ HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
+ seg->bit_start= field->pack_length() - table->blob_ptr_size;
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
else
{
- seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
- HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT);
+ seg->type= keyinfo->key_part[i].type;
/* Tell handler if it can do suffic space compression */
if (field->real_type() == MYSQL_TYPE_STRING &&
keyinfo->key_part[i].length > 4)
- seg->flag|=HA_SPACE_PACK;
+ seg->flag|= HA_SPACE_PACK;
}
if (!(field->flags & NOT_NULL_FLAG))
{
@@ -8361,7 +8388,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
/*
We are using a GROUP BY on something that contains NULL
In this case we have to tell MyISAM that two NULL should
- on INSERT be compared as equal
+ on INSERT be regarded at the same value
*/
if (!using_unique_constraint)
keydef.flag|= HA_NULL_ARE_EQUAL;
@@ -8645,21 +8672,19 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
}
if (table)
{
- int tmp;
+ int tmp, new_errno= 0;
if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE)))
{
DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed"));
- my_errno= tmp;
- error= -1;
+ new_errno= tmp;
}
if ((tmp=table->file->ha_index_or_rnd_end()))
{
DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
- my_errno= tmp;
- error= -1;
+ new_errno= tmp;
}
- if (error == -1)
- table->file->print_error(my_errno,MYF(0));
+ if (new_errno)
+ table->file->print_error(new_errno,MYF(0));
}
#ifndef DBUG_OFF
if (error)
@@ -9831,13 +9856,19 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(0);
}
- /* The null bits are already set */
+ /*
+ Copy null bits from group key to table
+ We can't copy all data as the key may have different format
+ as the row data (for example as with VARCHAR keys)
+ */
KEY_PART_INFO *key_part;
for (group=table->group,key_part=table->key_info[0].key_part;
group ;
group=group->next,key_part++)
- memcpy(table->record[0]+key_part->offset, group->buff, key_part->length);
-
+ {
+ if (key_part->null_bit)
+ memcpy(table->record[0]+key_part->offset, group->buff, 1);
+ }
init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy);
if ((error=table->file->write_row(table->record[0])))
@@ -11647,15 +11678,30 @@ calc_group_buffer(JOIN *join,ORDER *group)
{
if (field->type() == FIELD_TYPE_BLOB)
key_length+=MAX_BLOB_WIDTH; // Can't be used as a key
+ else if (field->type() == MYSQL_TYPE_VARCHAR)
+ key_length+= field->field_length + HA_KEY_BLOB_LENGTH;
else
- key_length+=field->pack_length();
+ key_length+= field->pack_length();
}
else if ((*group->item)->result_type() == REAL_RESULT)
key_length+=sizeof(double);
else if ((*group->item)->result_type() == INT_RESULT)
key_length+=sizeof(longlong);
+ else if ((*group->item)->result_type() == STRING_RESULT)
+ {
+ /*
+ Group strings are taken as varstrings and require an length field.
+ A field is not yet created by create_tmp_field()
+ and the sizes should match up.
+ */
+ key_length+= (*group->item)->max_length + HA_KEY_BLOB_LENGTH;
+ }
else
- key_length+=(*group->item)->max_length;
+ {
+ /* This case should never be choosen */
+ DBUG_ASSERT(0);
+ current_thd->fatal_error();
+ }
parts++;
if ((*group->item)->maybe_null)
null_parts++;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 5e42fc0ee30..0f26207b391 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -420,15 +420,15 @@ class store_key :public Sql_alloc
:null_ptr(null),err(0)
{
if (field_arg->type() == FIELD_TYPE_BLOB)
- to_field=new Field_varstring(ptr, length, (uchar*) null, 1,
+ {
+ /* Key segments are always packed with a 2 byte length prefix */
+ to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1,
Field::NONE, field_arg->field_name,
field_arg->table, field_arg->charset());
- else
- {
- to_field=field_arg->new_field(thd->mem_root,field_arg->table);
- if (to_field)
- to_field->move_field(ptr, (uchar*) null, 1);
}
+ else
+ to_field=field_arg->new_key_field(thd->mem_root, field_arg->table,
+ ptr, (uchar*) null, 1);
}
virtual ~store_key() {} /* Not actually needed */
virtual bool copy()=0;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index e50c68dd289..826bd2038f9 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1976,6 +1976,8 @@ int schema_tables_add(THD *thd, List<char> *files, const char *wild)
ST_SCHEMA_TABLE *tmp_schema_table= schema_tables;
for ( ; tmp_schema_table->table_name; tmp_schema_table++)
{
+ if (tmp_schema_table->hidden)
+ continue;
if (wild)
{
if (lower_case_table_names)
@@ -2374,12 +2376,24 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
const char *file_name)
{
TIME time;
- const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
+ LEX *lex= thd->lex;
+ const char *wild= lex->wild ? lex->wild->ptr() : NullS;
CHARSET_INFO *cs= system_charset_info;
DBUG_ENTER("get_schema_column_record");
if (res)
{
- DBUG_RETURN(1);
+ if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS)
+ {
+ /*
+ I.e. we are in SELECT FROM INFORMATION_SCHEMA.COLUMS
+ rather than in SHOW COLUMNS
+ */
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ res= 0;
+ }
+ DBUG_RETURN(res);
}
TABLE *show_table= tables->table;
@@ -2745,7 +2759,23 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
{
CHARSET_INFO *cs= system_charset_info;
DBUG_ENTER("get_schema_stat_record");
- if (!res && !tables->view)
+ if (res)
+ {
+ if (thd->lex->orig_sql_command != SQLCOM_SHOW_KEYS)
+ {
+ /*
+ I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS
+ rather than in SHOW KEYS
+ */
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ res= 0;
+ }
+ DBUG_RETURN(res);
+ }
+ else if (!tables->view)
{
TABLE *show_table= tables->table;
KEY *key_info=show_table->key_info;
@@ -2843,7 +2873,14 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables,
table->file->write_row(table->record[0]);
}
}
- DBUG_RETURN(res);
+ else
+ {
+ if (tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ }
+ DBUG_RETURN(0);
}
@@ -2868,7 +2905,15 @@ static int get_schema_constraints_record(THD *thd, struct st_table_list *tables,
const char *file_name)
{
DBUG_ENTER("get_schema_constraints_record");
- if (!res && !tables->view)
+ if (res)
+ {
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ else if (!tables->view)
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
@@ -2925,7 +2970,15 @@ static int get_schema_key_column_usage_record(THD *thd,
{
DBUG_ENTER("get_schema_key_column_usage_record");
CHARSET_INFO *cs= system_charset_info;
- if (!res && !tables->view)
+ if (res)
+ {
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ else if (!tables->view)
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
@@ -3338,7 +3391,7 @@ int make_schema_select(THD *thd, SELECT_LEX *sel,
/*
- Fill temporaty schema tables before SELECT
+ Fill temporary schema tables before SELECT
SYNOPSIS
get_schema_tables_result()
@@ -3637,38 +3690,38 @@ ST_FIELD_INFO table_names_fields_info[]=
ST_SCHEMA_TABLE schema_tables[]=
{
{"SCHEMATA", schema_fields_info, create_schema_table,
- fill_schema_shemata, make_schemata_old_format, 0, 1, -1},
+ fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0},
{"TABLES", tables_fields_info, create_schema_table,
- get_all_tables, make_old_format, get_schema_tables_record, 1, 2},
+ get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0},
{"COLUMNS", columns_fields_info, create_schema_table,
- get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2},
+ get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0},
{"CHARACTER_SETS", charsets_fields_info, create_schema_table,
- fill_schema_charsets, make_character_sets_old_format, 0, -1, -1},
+ fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0},
{"COLLATIONS", collation_fields_info, create_schema_table,
- fill_schema_collation, make_old_format, 0, -1, -1},
+ fill_schema_collation, make_old_format, 0, -1, -1, 0},
{"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info,
- create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1},
+ create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0},
{"ROUTINES", proc_fields_info, create_schema_table,
- fill_schema_proc, make_proc_old_format, 0, -1, -1},
+ fill_schema_proc, make_proc_old_format, 0, -1, -1, 0},
{"STATISTICS", stat_fields_info, create_schema_table,
- get_all_tables, make_old_format, get_schema_stat_record, 1, 2},
+ get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0},
{"VIEWS", view_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_views_record, 1, 2},
+ get_all_tables, 0, get_schema_views_record, 1, 2, 0},
{"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table,
- fill_schema_user_privileges, 0, 0, -1, -1},
+ fill_schema_user_privileges, 0, 0, -1, -1, 0},
{"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table,
- fill_schema_schema_privileges, 0, 0, -1, -1},
+ fill_schema_schema_privileges, 0, 0, -1, -1, 0},
{"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
- fill_schema_table_privileges, 0, 0, -1, -1},
+ fill_schema_table_privileges, 0, 0, -1, -1, 0},
{"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table,
- fill_schema_column_privileges, 0, 0, -1, -1},
+ fill_schema_column_privileges, 0, 0, -1, -1, 0},
{"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_constraints_record, 3, 4},
+ get_all_tables, 0, get_schema_constraints_record, 3, 4, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_key_column_usage_record, 4, 5},
+ get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0},
{"TABLE_NAMES", table_names_fields_info, create_schema_table,
- get_all_tables, make_table_names_old_format, 0, 1, 2},
- {0, 0, 0, 0, 0, 0, 0, 0}
+ get_all_tables, make_table_names_old_format, 0, 1, 2, 1},
+ {0, 0, 0, 0, 0, 0, 0, 0, 0}
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 56605d1c6e0..6629122a1fa 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -458,6 +458,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
int field_no,dup_no;
int select_field_pos,auto_increment=0;
List_iterator<create_field> it(fields),it2(fields);
+ uint total_uneven_bit_length= 0;
DBUG_ENTER("mysql_prepare_table");
select_field_pos=fields.elements - select_field_count;
@@ -508,7 +509,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
String conv, *tmp;
for (uint i= 0; (tmp= it++); i++)
{
- if (String::needs_conversion(tmp->length(), tmp->charset(), cs, &dummy))
+ if (String::needs_conversion(tmp->length(), tmp->charset(), cs,
+ &dummy))
{
uint cnv_errs;
conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs);
@@ -614,6 +616,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (!(sql_field->flags & NOT_NULL_FLAG))
null_fields++;
+ if (sql_field->sql_type == FIELD_TYPE_BIT)
+ total_uneven_bit_length+= sql_field->length & 7;
+
if (check_column_name(sql_field->field_name))
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0), sql_field->field_name);
@@ -666,7 +671,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
/* If fixed row records, we need one bit to check for deleted rows */
if (!(db_options & HA_OPTION_PACK_RECORD))
null_fields++;
- pos=(null_fields+7)/8;
+ pos= (null_fields + total_uneven_bit_length + 7) / 8;
it.rewind();
while ((sql_field=it++))
@@ -762,6 +767,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
case FIELD_TYPE_NULL:
sql_field->pack_flag=f_settype((uint) sql_field->sql_type);
break;
+ case FIELD_TYPE_BIT:
+ if (!(file->table_flags() & HA_CAN_BIT_FIELD))
+ {
+ my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "BIT FIELD");
+ DBUG_RETURN(-1);
+ }
+ sql_field->pack_flag= FIELDFLAG_NUMBER;
+ break;
case FIELD_TYPE_TIMESTAMP:
/* We should replace old TIMESTAMP fields with their newer analogs */
if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD)
@@ -3686,7 +3699,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
free_io_cache(from);
delete [] copy; // This is never 0
- if (to->file->end_bulk_insert() && !error)
+ if (to->file->end_bulk_insert() && error <= 0)
{
to->file->print_error(my_errno,MYF(0));
error=1;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index a1a821e9955..99b8caed784 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -208,6 +208,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token BACKUP_SYM
%token BERKELEY_DB_SYM
%token BINARY
+%token BIN_NUM
%token BIT_SYM
%token BOOL_SYM
%token BOOLEAN_SYM
@@ -664,7 +665,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
- sp_opt_label
+ sp_opt_label BIN_NUM
%type <lex_str_ptr>
opt_table_alias
@@ -2750,8 +2751,10 @@ type:
int_type opt_len field_options { $$=$1; }
| real_type opt_precision field_options { $$=$1; }
| FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; }
- | BIT_SYM opt_len { Lex->length=(char*) "1";
- $$=FIELD_TYPE_TINY; }
+ | BIT_SYM { Lex->length= (char*) "1";
+ $$=FIELD_TYPE_BIT; }
+ | BIT_SYM '(' NUM ')' { Lex->length= $3.str;
+ $$=FIELD_TYPE_BIT; }
| BOOL_SYM { Lex->length=(char*) "1";
$$=FIELD_TYPE_TINY; }
| BOOLEAN_SYM { Lex->length=(char*) "1";
@@ -6458,15 +6461,25 @@ text_string:
{ $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); }
| HEX_NUM
{
- Item *tmp = new Item_varbinary($1.str,$1.length);
+ Item *tmp= new Item_hex_string($1.str, $1.length);
/*
- it is OK only emulate fix_fieds, because we need only
+ it is OK only emulate fix_fields, because we need only
value of constant
*/
$$= tmp ?
tmp->quick_fix_field(), tmp->val_str((String*) 0) :
(String*) 0;
}
+ | BIN_NUM
+ {
+ Item *tmp= new Item_bin_string($1.str, $1.length);
+ /*
+ it is OK only emulate fix_fields, because we need only
+ value of constant
+ */
+ $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ }
;
param_marker:
@@ -6508,10 +6521,11 @@ literal:
| NUM_literal { $$ = $1; }
| NULL_SYM { $$ = new Item_null();
Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;}
- | HEX_NUM { $$ = new Item_varbinary($1.str,$1.length);}
+ | HEX_NUM { $$ = new Item_hex_string($1.str, $1.length);}
+ | BIN_NUM { $$= new Item_bin_string($1.str, $1.length); }
| UNDERSCORE_CHARSET HEX_NUM
{
- Item *tmp= new Item_varbinary($2.str,$2.length);
+ Item *tmp= new Item_hex_string($2.str, $2.length);
/*
it is OK only emulate fix_fieds, because we need only
value of constant
@@ -6523,6 +6537,20 @@ literal:
str ? str->length() : 0,
Lex->charset);
}
+ | UNDERSCORE_CHARSET BIN_NUM
+ {
+ Item *tmp= new Item_bin_string($2.str, $2.length);
+ /*
+ it is OK only emulate fix_fieds, because we need only
+ value of constant
+ */
+ String *str= tmp ?
+ tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ $$= new Item_string(str ? str->ptr() : "",
+ str ? str->length() : 0,
+ Lex->charset);
+ }
| DATE_SYM text_literal { $$ = $2; }
| TIME_SYM text_literal { $$ = $2; }
| TIMESTAMP text_literal { $$ = $2; };
@@ -6857,6 +6885,7 @@ keyword:
| CLIENT_SYM {}
| CLOSE_SYM {}
| COLLATION_SYM {}
+ | COLUMNS {}
| COMMENT_SYM {}
| COMMITTED_SYM {}
| COMMIT_SYM {}
@@ -6978,6 +7007,7 @@ keyword:
| POLYGON {}
| PREPARE_SYM {}
| PREV_SYM {}
+ | PRIVILEGES {}
| PROCESS {}
| PROCESSLIST_SYM {}
| QUARTER_SYM {}
@@ -7029,6 +7059,7 @@ keyword:
| SUBDATE_SYM {}
| SUBJECT_SYM {}
| SUPER_SYM {}
+ | TABLES {}
| TABLESPACE {}
| TEMPORARY {}
| TEMPTABLE_SYM {}
diff --git a/sql/structs.h b/sql/structs.h
index 5d0c7bc4f1f..0b59c3abeb3 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -74,7 +74,7 @@ typedef struct st_key_part_info { /* Info about a key part */
uint16 store_length;
uint16 key_type;
uint16 fieldnr; /* Fieldnum in UNIREG */
- uint8 key_part_flag; /* 0 or HA_REVERSE_SORT */
+ uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */
uint8 type;
uint8 null_bit; /* Position to null_bit */
} KEY_PART_INFO ;
diff --git a/sql/table.cc b/sql/table.cc
index b4a07448b14..c18a2557337 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -81,7 +81,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
KEY *keyinfo;
KEY_PART_INFO *key_part;
uchar *null_pos;
- uint null_bit, new_frm_ver, field_pack_length;
+ uint null_bit_pos, new_frm_ver, field_pack_length;
SQL_CRYPT *crypted=0;
MEM_ROOT **root_ptr, *old_root;
DBUG_ENTER("openfrm");
@@ -409,15 +409,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (null_field_first)
{
outparam->null_flags=null_pos=(uchar*) record+1;
- null_bit= (db_create_options & HA_OPTION_PACK_RECORD) ? 1 : 2;
- outparam->null_bytes=(outparam->null_fields+null_bit+6)/8;
+ null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1;
+ outparam->null_bytes= (outparam->null_fields + null_bit_pos + 7) / 8;
}
else
{
outparam->null_bytes=(outparam->null_fields+7)/8;
outparam->null_flags=null_pos=
(uchar*) (record+1+outparam->reclength-outparam->null_bytes);
- null_bit=1;
+ null_bit_pos= 0;
}
use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH;
@@ -512,7 +512,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
*field_ptr=reg_field=
make_field(record+recpos,
(uint32) field_length,
- null_pos,null_bit,
+ null_pos, null_bit_pos,
pack_flag,
field_type,
charset,
@@ -529,14 +529,19 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
goto err_not_open; /* purecov: inspected */
}
reg_field->comment=comment;
- if (!(reg_field->flags & NOT_NULL_FLAG))
+ if (field_type == FIELD_TYPE_BIT)
{
- if ((null_bit<<=1) == 256)
+ if ((null_bit_pos+= field_length & 7) > 7)
{
- null_pos++;
- null_bit=1;
+ null_pos++;
+ null_bit_pos-= 8;
}
}
+ if (!(reg_field->flags & NOT_NULL_FLAG))
+ {
+ if (!(null_bit_pos= (null_bit_pos + 1) & 7))
+ null_pos++;
+ }
if (f_no_default(pack_flag))
reg_field->flags|= NO_DEFAULT_VALUE_FLAG;
if (reg_field->unireg_check == Field::NEXT_NUMBER)
@@ -626,6 +631,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (!(field->flags & BINARY_FLAG))
keyinfo->flags|= HA_END_SPACE_KEY;
}
+ if (field->type() == MYSQL_TYPE_BIT)
+ key_part->key_part_flag|= HA_BIT_PART;
+
if (i == 0 && key != primary_key)
field->flags |=
((keyinfo->flags & HA_NOSAME) &&
@@ -1845,8 +1853,9 @@ ok:
DBUG_RETURN(0);
err:
- /* Hide "Unknown column" error */
- if (thd->net.last_errno == ER_BAD_FIELD_ERROR)
+ /* Hide "Unknown column" or "Unknown function" error */
+ if (thd->net.last_errno == ER_BAD_FIELD_ERROR ||
+ thd->net.last_errno == ER_SP_DOES_NOT_EXIST)
{
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0), view_db.str, view_name.str);
diff --git a/sql/table.h b/sql/table.h
index f5f2a76c6f1..a804376ee3c 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -254,6 +254,7 @@ typedef struct st_schema_table
TABLE *table, bool res, const char *base_name,
const char *file_name);
int idx_field1, idx_field2;
+ bool hidden;
} ST_SCHEMA_TABLE;
diff --git a/sql/time.cc b/sql/time.cc
index 562f9956ccc..f1d21915c23 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -263,95 +263,6 @@ str_to_time_with_warn(const char *str, uint length, TIME *l_time)
/*
- Convert datetime value specified as number to broken-down TIME
- representation and form value of DATETIME type as side-effect.
-
- SYNOPSIS
- number_to_TIME()
- nr - datetime value as number
- time_res - pointer for structure for broken-down representation
- fuzzy_date - indicates whenever we allow fuzzy dates
- was_cut - set ot 1 if there was some kind of error during
- conversion or to 0 if everything was OK.
-
- DESCRIPTION
- Convert a datetime value of formats YYMMDD, YYYYMMDD, YYMMDDHHMSS,
- YYYYMMDDHHMMSS to broken-down TIME representation. Return value in
- YYYYMMDDHHMMSS format as side-effect.
-
- This function also checks if datetime value fits in DATETIME range.
-
- RETURN VALUE
- Datetime value in YYYYMMDDHHMMSS format.
- If input value is not valid datetime value then 0 is returned.
-*/
-
-longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date,
- int *was_cut)
-{
- long part1,part2;
-
- *was_cut= 0;
-
- if (nr == LL(0) || nr >= LL(10000101000000))
- goto ok;
- if (nr < 101)
- goto err;
- if (nr <= (YY_PART_YEAR-1)*10000L+1231L)
- {
- nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069
- goto ok;
- }
- if (nr < (YY_PART_YEAR)*10000L+101L)
- goto err;
- if (nr <= 991231L)
- {
- nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999
- goto ok;
- }
- if (nr < 10000101L)
- goto err;
- if (nr <= 99991231L)
- {
- nr= nr*1000000L;
- goto ok;
- }
- if (nr < 101000000L)
- goto err;
- if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959))
- {
- nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069
- goto ok;
- }
- if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000))
- goto err;
- if (nr <= LL(991231235959))
- nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999
-
- ok:
- part1=(long) (nr/LL(1000000));
- part2=(long) (nr - (longlong) part1*LL(1000000));
- time_res->year= (int) (part1/10000L); part1%=10000L;
- time_res->month= (int) part1 / 100;
- time_res->day= (int) part1 % 100;
- time_res->hour= (int) (part2/10000L); part2%=10000L;
- time_res->minute=(int) part2 / 100;
- time_res->second=(int) part2 % 100;
-
- if (time_res->year <= 9999 && time_res->month <= 12 &&
- time_res->day <= 31 && time_res->hour <= 23 &&
- time_res->minute <= 59 && time_res->second <= 59 &&
- (fuzzy_date || (time_res->month != 0 && time_res->day != 0) || nr==0))
- return nr;
-
- err:
-
- *was_cut= 1;
- return LL(0);
-}
-
-
-/*
Convert a system time structure to TIME
*/
@@ -807,77 +718,4 @@ void make_truncated_value_warning(THD *thd, const char *str_val,
}
-/* Convert time value to integer in YYYYMMDDHHMMSS format */
-
-ulonglong TIME_to_ulonglong_datetime(const TIME *time)
-{
- return ((ulonglong) (time->year * 10000UL +
- time->month * 100UL +
- time->day) * ULL(1000000) +
- (ulonglong) (time->hour * 10000UL +
- time->minute * 100UL +
- time->second));
-}
-
-
-/* Convert TIME value to integer in YYYYMMDD format */
-
-ulonglong TIME_to_ulonglong_date(const TIME *time)
-{
- return (ulonglong) (time->year * 10000UL + time->month * 100UL + time->day);
-}
-
-
-/*
- Convert TIME value to integer in HHMMSS format.
- This function doesn't take into account time->day member:
- it's assumed that days have been converted to hours already.
-*/
-
-ulonglong TIME_to_ulonglong_time(const TIME *time)
-{
- return (ulonglong) (time->hour * 10000UL +
- time->minute * 100UL +
- time->second);
-}
-
-
-/*
- Convert struct TIME (date and time split into year/month/day/hour/...
- to a number in format YYYYMMDDHHMMSS (DATETIME),
- YYYYMMDD (DATE) or HHMMSS (TIME).
-
- SYNOPSIS
- TIME_to_ulonglong()
-
- DESCRIPTION
- The function is used when we need to convert value of time item
- to a number if it's used in numeric context, i. e.:
- SELECT NOW()+1, CURDATE()+0, CURTIMIE()+0;
- SELECT ?+1;
-
- NOTE
- This function doesn't check that given TIME structure members are
- in valid range. If they are not, return value won't reflect any
- valid date either.
-*/
-
-ulonglong TIME_to_ulonglong(const TIME *time)
-{
- switch (time->time_type) {
- case MYSQL_TIMESTAMP_DATETIME:
- return TIME_to_ulonglong_datetime(time);
- case MYSQL_TIMESTAMP_DATE:
- return TIME_to_ulonglong_date(time);
- case MYSQL_TIMESTAMP_TIME:
- return TIME_to_ulonglong_time(time);
- case MYSQL_TIMESTAMP_NONE:
- case MYSQL_TIMESTAMP_ERROR:
- return ULL(0);
- default:
- DBUG_ASSERT(0);
- }
- return 0;
-}
-
#endif
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 636156940a4..dbd3da58a33 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -652,7 +652,7 @@ static bool make_empty_rec(File file,enum db_type table_type,
Field *regfield=make_field((char*) buff+field->offset,field->length,
field->flags & NOT_NULL_FLAG ? 0:
null_pos+null_count/8,
- 1 << (null_count & 7),
+ null_count & 7,
field->pack_flag,
field->sql_type,
field->charset,
diff --git a/sql/unireg.h b/sql/unireg.h
index 31b28da2423..932bdf4dfc5 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -66,6 +66,8 @@
/* Max column width +1 */
#define MAX_FIELD_WIDTH (MAX_FIELD_CHARLENGTH*MAX_MBWIDTH+1)
+#define MAX_BIT_FIELD_LENGTH 64 /* Max length in bits for bit fields */
+
#define MAX_DATE_WIDTH 10 /* YYYY-MM-DD */
#define MAX_TIME_WIDTH 23 /* -DDDDDD HH:MM:SS.###### */
#define MAX_DATETIME_FULL_WIDTH 29 /* YYYY-MM-DD HH:MM:SS.###### AM */