summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <dlenev@mockturtle.local>2006-10-02 22:53:10 +0400
committerunknown <dlenev@mockturtle.local>2006-10-02 22:53:10 +0400
commit2e2198633e71cfb77dc95f17bfe8f6bb88d63424 (patch)
tree5bb148bebab77a9d2565aed7985b1dd0c5d3f41d /sql
parent72bff0c960590e45d42fcc51d627ccb1c3c055a8 (diff)
parent84000c374b5e4e2fa61a11481bf65a87f0b123a1 (diff)
downloadmariadb-git-2e2198633e71cfb77dc95f17bfe8f6bb88d63424.tar.gz
Merge bk-internal.mysql.com:/home/bk/mysql-5.0
into mockturtle.local:/home/dlenev/src/mysql-5.0-rt-merge mysql-test/r/ps.result: Auto merged mysql-test/t/ps.test: Auto merged sql/item.cc: Auto merged sql/mysql_priv.h: Auto merged sql/sql_select.cc: Auto merged sql/sql_update.cc: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/field.h2
-rw-r--r--sql/filesort.cc1
-rw-r--r--sql/ha_archive.cc34
-rw-r--r--sql/ha_archive.h1
-rw-r--r--sql/item.cc1
-rw-r--r--sql/item_func.cc35
-rw-r--r--sql/item_func.h1
-rw-r--r--sql/item_sum.cc22
-rw-r--r--sql/log_event.cc1
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/set_var.cc13
-rw-r--r--sql/share/errmsg.txt5
-rw-r--r--sql/sql_base.cc5
-rw-r--r--sql/sql_class.cc16
-rw-r--r--sql/sql_class.h48
-rw-r--r--sql/sql_insert.cc58
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_load.cc12
-rw-r--r--sql/sql_parse.cc16
-rw-r--r--sql/sql_select.cc13
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy8
-rw-r--r--sql/unireg.cc39
24 files changed, 282 insertions, 61 deletions
diff --git a/sql/field.h b/sql/field.h
index 65e747e9d2f..b79c2bf77a8 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1432,6 +1432,8 @@ public:
uint decimals, flags, pack_length, key_length;
Field::utype unireg_check;
TYPELIB *interval; // Which interval to use
+ TYPELIB *save_interval; // Temporary copy for the above
+ // Used only for UCS2 intervals
List<String> interval_list;
CHARSET_INFO *charset;
Field::geometry_type geom_type;
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 42d25dbbaee..f13354d5c72 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1268,6 +1268,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
switch ((sortorder->result_type=sortorder->item->result_type())) {
case STRING_RESULT:
sortorder->length=sortorder->item->max_length;
+ set_if_smaller(sortorder->length, thd->variables.max_sort_length);
if (use_strnxfrm((cs=sortorder->item->collation.collation)))
{
sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length);
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index bb94a99e700..bc3c819c4ed 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -120,6 +120,8 @@ static bool archive_inited= FALSE;
/* Variables for archive share methods */
pthread_mutex_t archive_mutex;
static HASH archive_open_tables;
+static z_off_t max_zfile_size;
+static int zoffset_size;
/* The file extension */
#define ARZ ".ARZ" // The data file
@@ -203,6 +205,18 @@ bool archive_db_init()
}
else
{
+ zoffset_size= 2 << ((zlibCompileFlags() >> 6) & 3);
+ switch (sizeof(z_off_t)) {
+ case 2:
+ max_zfile_size= INT_MAX16;
+ break;
+ case 8:
+ max_zfile_size= LONGLONG_MAX;
+ break;
+ case 4:
+ default:
+ max_zfile_size= INT_MAX32;
+ }
archive_inited= TRUE;
DBUG_RETURN(FALSE);
}
@@ -240,7 +254,7 @@ ha_archive::ha_archive(TABLE *table_arg)
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
/* The size of the offset value we will use for position() */
- ref_length = 2 << ((zlibCompileFlags() >> 6) & 3);
+ ref_length = zoffset_size;
DBUG_ASSERT(ref_length <= sizeof(z_off_t));
}
@@ -480,7 +494,8 @@ int ha_archive::init_archive_writer()
DBUG_RETURN(1);
}
share->archive_write_open= TRUE;
-
+ info(HA_STATUS_TIME);
+ share->approx_file_size= data_file_length;
DBUG_RETURN(0);
}
@@ -651,10 +666,21 @@ error:
*/
int ha_archive::real_write_row(byte *buf, gzFile writer)
{
- z_off_t written;
+ z_off_t written, total_row_length;
uint *ptr, *end;
DBUG_ENTER("ha_archive::real_write_row");
-
+ total_row_length= table->s->reclength;
+ for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields;
+ ptr != end; ptr++)
+ total_row_length+= ((Field_blob*) table->field[*ptr])->get_length();
+ if (share->approx_file_size > max_zfile_size - total_row_length)
+ {
+ info(HA_STATUS_TIME);
+ share->approx_file_size= data_file_length;
+ if (share->approx_file_size > max_zfile_size - total_row_length)
+ DBUG_RETURN(HA_ERR_RECORD_FILE_FULL);
+ }
+ share->approx_file_size+= total_row_length;
written= gzwrite(writer, buf, table->s->reclength);
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
if (!delayed_insert || !bulk_insert)
diff --git a/sql/ha_archive.h b/sql/ha_archive.h
index 2bac9fa605e..564b9f03bf5 100644
--- a/sql/ha_archive.h
+++ b/sql/ha_archive.h
@@ -38,6 +38,7 @@ typedef struct st_archive_share {
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
+ z_off_t approx_file_size; /* Approximate archive data file size */
} ARCHIVE_SHARE;
/*
diff --git a/sql/item.cc b/sql/item.cc
index ba13ba985b5..d56ca95093b 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1182,6 +1182,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
split_sum_func(thd, ref_pointer_array, fields);
}
else if ((type() == SUM_FUNC_ITEM || (used_tables() & ~PARAM_TABLE_BIT)) &&
+ type() != SUBSELECT_ITEM &&
(type() != REF_ITEM ||
((Item_ref*)this)->ref_type() == Item_ref::VIEW_REF))
{
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 2e594c74031..e395a7a3af5 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3345,6 +3345,34 @@ longlong Item_func_release_lock::val_int()
}
+bool Item_func_last_insert_id::fix_fields(THD *thd, Item **ref)
+{
+ DBUG_ASSERT(fixed == 0);
+
+ if (Item_int_func::fix_fields(thd, ref))
+ return TRUE;
+
+ if (arg_count == 0)
+ {
+ if (!thd->last_insert_id_used)
+ {
+ /*
+ As this statement calls LAST_INSERT_ID(), set
+ THD::last_insert_id_used and remember first generated insert
+ id of the previous statement in THD::current_insert_id.
+ */
+ thd->last_insert_id_used= TRUE;
+ thd->current_insert_id= thd->last_insert_id;
+ }
+ null_value= FALSE;
+ }
+
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+
+ return FALSE;
+}
+
+
longlong Item_func_last_insert_id::val_int()
{
THD *thd= current_thd;
@@ -3354,12 +3382,13 @@ longlong Item_func_last_insert_id::val_int()
longlong value= args[0]->val_int();
thd->insert_id(value);
null_value= args[0]->null_value;
- return value; // Avoid side effect of insert_id()
+ return value;
}
- thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return thd->last_insert_id_used ? thd->current_insert_id : thd->insert_id();
+
+ return thd->current_insert_id;
}
+
/* This function is just used to test speed of different functions */
longlong Item_func_benchmark::val_int()
diff --git a/sql/item_func.h b/sql/item_func.h
index 177daf0311f..31adc033034 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -891,6 +891,7 @@ public:
if (arg_count)
max_length= args[0]->max_length;
}
+ bool fix_fields(THD *thd, Item **ref);
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index bcd8270e52f..5ca1dbba94b 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -246,7 +246,27 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
aggr_sl->inner_sum_func_list->next= this;
}
aggr_sl->inner_sum_func_list= this;
-
+ aggr_sl->with_sum_func= 1;
+
+ /*
+ Mark Item_subselect(s) as containing aggregate function all the way up
+ to aggregate function's calculation context.
+ Note that we must not mark the Item of calculation context itself
+ because with_sum_func on the calculation context st_select_lex is
+ already set above.
+
+ with_sum_func being set for an Item means that this Item refers
+ (somewhere in it, e.g. one of its arguments if it's a function) directly
+ or through intermediate items to an aggregate function that is calculated
+ in a context "outside" of the Item (e.g. in the current or outer select).
+
+ with_sum_func being set for an st_select_lex means that this st_select_lex
+ has aggregate functions directly referenced (i.e. not through a sub-select).
+ */
+ for (sl= thd->lex->current_select;
+ sl && sl != aggr_sl && sl->master_unit()->item;
+ sl= sl->master_unit()->outer_select() )
+ sl->master_unit()->item->with_sum_func= 1;
}
return FALSE;
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 219434ab218..271658d8054 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -3365,7 +3365,6 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli)
{
switch (type) {
case LAST_INSERT_ID_EVENT:
- thd->last_insert_id_used = 1;
thd->last_insert_id = val;
break;
case INSERT_ID_EVENT:
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 11f6375ebe0..c6cc6519d78 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -446,7 +446,8 @@ enum enum_parsing_place
NO_MATTER,
IN_HAVING,
SELECT_LIST,
- IN_WHERE
+ IN_WHERE,
+ IN_ON
};
struct st_table;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index c667e2f2bcc..d00857a2bc1 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -2571,8 +2571,17 @@ bool sys_var_last_insert_id::update(THD *thd, set_var *var)
byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- thd->sys_var_tmp.long_value= (long) thd->insert_id();
- return (byte*) &thd->last_insert_id;
+ if (!thd->last_insert_id_used)
+ {
+ /*
+ As this statement reads @@LAST_INSERT_ID, set
+ THD::last_insert_id_used and remember first generated insert id
+ of the previous statement in THD::current_insert_id.
+ */
+ thd->last_insert_id_used= TRUE;
+ thd->current_insert_id= thd->last_insert_id;
+ }
+ return (byte*) &thd->current_insert_id;
}
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 05b31b1e8b2..5ed7466b0df 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -3813,7 +3813,7 @@ ER_WRONG_MRG_TABLE
cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì"
dan "Tabellerne i MERGE er ikke defineret ens"
nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
- eng "All tables in the MERGE table are not identically defined"
+ eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exists"
est "Kõik tabelid MERGE tabeli määratluses ei ole identsed"
fre "Toutes les tables de la table de type MERGE n'ont pas la même définition"
ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
@@ -5631,3 +5631,6 @@ ER_HOSTNAME
eng "host name"
ER_WRONG_STRING_LENGTH
eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ER_NON_INSERTABLE_TABLE
+ eng "The target table %-.100s of the %s is not insertable-into"
+
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index c29c610b200..85be84d1270 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -902,8 +902,11 @@ void update_non_unique_table_error(TABLE_LIST *update,
*/
if (update->view)
{
+ /* Issue the ER_NON_INSERTABLE_TABLE error for an INSERT */
if (update->view == duplicate->view)
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), update->alias, operation);
+ my_error(!strncmp(operation, "INSERT", 6) ?
+ ER_NON_INSERTABLE_TABLE : ER_NON_UPDATABLE_TABLE, MYF(0),
+ update->alias, operation);
else
my_error(ER_VIEW_PREVENT_UPDATE, MYF(0),
(duplicate->view ? duplicate->alias : update->alias),
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 093173ab949..4d47ec338c0 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -553,10 +553,24 @@ bool THD::store_globals()
}
-/* Cleanup after a query */
+/*
+ Cleanup after query.
+
+ SYNOPSIS
+ THD::cleanup_after_query()
+ DESCRIPTION
+ This function is used to reset thread data to it's default state.
+
+ NOTE
+ This function is not suitable for setting thread data to some
+ non-default values, as there is only one replication thread, so
+ different master threads may overwrite data of each other on
+ slave.
+*/
void THD::cleanup_after_query()
{
+ last_insert_id_used= FALSE;
if (clear_next_insert_id)
{
clear_next_insert_id= 0;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 039c133e885..ccc7a661446 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1252,17 +1252,29 @@ public:
ulonglong next_insert_id;
/* Remember last next_insert_id to reset it if something went wrong */
ulonglong prev_insert_id;
+
/*
- The insert_id used for the last statement or set by SET LAST_INSERT_ID=#
- or SELECT LAST_INSERT_ID(#). Used for binary log and returned by
- LAST_INSERT_ID()
+ At the beginning of the statement last_insert_id holds the first
+ generated value of the previous statement. During statement
+ execution it is updated to the value just generated, but then
+ restored to the value that was generated first, so for the next
+ statement it will again be "the first generated value of the
+ previous statement".
+
+ It may also be set with "LAST_INSERT_ID(expr)" or
+ "@@LAST_INSERT_ID= expr", but the effect of such setting will be
+ seen only in the next statement.
*/
ulonglong last_insert_id;
+
/*
- Set to the first value that LAST_INSERT_ID() returned for the last
- statement. When this is set, last_insert_id_used is set to true.
+ current_insert_id remembers the first generated value of the
+ previous statement, and does not change during statement
+ execution. Its value returned from LAST_INSERT_ID() and
+ @@LAST_INSERT_ID.
*/
ulonglong current_insert_id;
+
ulonglong limit_found_rows;
ulonglong options; /* Bitmap of states */
longlong row_count_func; /* For the ROW_COUNT() function */
@@ -1325,7 +1337,22 @@ public:
bool last_cuted_field;
bool no_errors, password, is_fatal_error;
bool query_start_used, rand_used, time_zone_used;
- bool last_insert_id_used,insert_id_used, clear_next_insert_id;
+
+ /*
+ last_insert_id_used is set when current statement calls
+ LAST_INSERT_ID() or reads @@LAST_INSERT_ID, so that binary log
+ LAST_INSERT_ID_EVENT be generated.
+ */
+ bool last_insert_id_used;
+
+ /*
+ insert_id_used is set when current statement updates
+ THD::last_insert_id, so that binary log INSERT_ID_EVENT be
+ generated.
+ */
+ bool insert_id_used;
+
+ bool clear_next_insert_id;
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
@@ -1461,15 +1488,6 @@ public:
insert_id_used=1;
substitute_null_with_insert_id= TRUE;
}
- inline ulonglong insert_id(void)
- {
- if (!last_insert_id_used)
- {
- last_insert_id_used=1;
- current_insert_id=last_insert_id;
- }
- return last_insert_id;
- }
inline ulonglong found_rows(void)
{
return limit_found_rows;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index b5dab814d08..2ce83caa369 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -111,7 +111,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (!table_list->updatable)
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
return -1;
}
@@ -214,7 +214,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
(table_list->view &&
check_view_insertability(thd, table_list)))
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
return -1;
}
@@ -590,10 +590,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
#endif
error=write_record(thd, table ,&info);
/*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
+ If auto_increment values are used, save the first one for
+ LAST_INSERT_ID() and for the update log.
*/
if (! id && thd->insert_id_used)
{ // Get auto increment value
@@ -1303,6 +1301,9 @@ public:
time_t start_time;
bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query;
ulonglong last_insert_id;
+ ulonglong next_insert_id;
+ ulong auto_increment_increment;
+ ulong auto_increment_offset;
timestamp_auto_set_type timestamp_field_type;
uint query_length;
@@ -1684,6 +1685,22 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool igno
row->last_insert_id= thd->last_insert_id;
row->timestamp_field_type= table->timestamp_field_type;
+ /* The session variable settings can always be copied. */
+ row->auto_increment_increment= thd->variables.auto_increment_increment;
+ row->auto_increment_offset= thd->variables.auto_increment_offset;
+ /*
+ Next insert id must be set for the first value in a multi-row insert
+ only. So clear it after the first use. Assume a multi-row insert.
+ Since the user thread doesn't really execute the insert,
+ thd->next_insert_id is left untouched between the rows. If we copy
+ the same insert id to every row of the multi-row insert, the delayed
+ insert thread would copy this before inserting every row. Thus it
+ tries to insert all rows with the same insert id. This fails on the
+ unique constraint. So just the first row would be really inserted.
+ */
+ row->next_insert_id= thd->next_insert_id;
+ thd->next_insert_id= 0;
+
di->rows.push_back(row);
di->stacked_inserts++;
di->status=1;
@@ -2055,6 +2072,14 @@ bool delayed_insert::handle_inserts(void)
thd.insert_id_used=row->insert_id_used;
table->timestamp_field_type= row->timestamp_field_type;
+ /* The session variable settings can always be copied. */
+ thd.variables.auto_increment_increment= row->auto_increment_increment;
+ thd.variables.auto_increment_offset= row->auto_increment_offset;
+ /* Next insert id must be used only if non-zero. */
+ if (row->next_insert_id)
+ thd.next_insert_id= row->next_insert_id;
+ DBUG_PRINT("loop", ("next_insert_id: %lu", (ulong) thd.next_insert_id));
+
info.ignore= row->ignore;
info.handle_duplicates= row->dup;
if (info.ignore ||
@@ -2076,6 +2101,20 @@ bool delayed_insert::handle_inserts(void)
info.error_count++; // Ignore errors
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
row->log_query = 0;
+ /*
+ We must reset next_insert_id. Otherwise all following rows may
+ become duplicates. If write_record() failed on a duplicate and
+ next_insert_id would be left unchanged, the next rows would also
+ be tried with the same insert id and would fail. Since the end
+ of a multi-row statement is unknown here, all following rows in
+ the queue would be dropped, regardless which thread added them.
+ After the queue is used up, next_insert_id is cleared and the
+ next run will succeed. This could even happen if these come from
+ the same multi-row statement as the current queue contents. That
+ way it would look somewhat random which rows are rejected after
+ a duplicate.
+ */
+ thd.next_insert_id= 0;
}
if (using_ignore)
{
@@ -2121,6 +2160,7 @@ bool delayed_insert::handle_inserts(void)
/* This should never happen */
table->file->print_error(error,MYF(0));
sql_print_error("%s",thd.net.last_error);
+ DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop"));
goto err;
}
query_cache_invalidate3(&thd, table, 1);
@@ -2146,6 +2186,7 @@ bool delayed_insert::handle_inserts(void)
{ // This shouldn't happen
table->file->print_error(error,MYF(0));
sql_print_error("%s",thd.net.last_error);
+ DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop"));
goto err;
}
query_cache_invalidate3(&thd, table, 1);
@@ -2153,13 +2194,16 @@ bool delayed_insert::handle_inserts(void)
DBUG_RETURN(0);
err:
+ DBUG_EXECUTE("error", max_rows= 0;);
/* Remove all not used rows */
while ((row=rows.get()))
{
delete row;
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
stacked_inserts--;
+ DBUG_EXECUTE("error", max_rows++;);
}
+ DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows));
thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status);
pthread_mutex_lock(&mutex);
DBUG_RETURN(1);
@@ -2447,7 +2491,7 @@ bool select_insert::send_data(List<Item> &values)
*/
table->next_number_field->reset();
if (!last_insert_id && thd->insert_id_used)
- last_insert_id= thd->insert_id();
+ last_insert_id= thd->last_insert_id;
}
}
DBUG_RETURN(error);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index d0087b14d6a..788276ac654 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1710,7 +1710,8 @@ bool st_lex::can_be_merged()
unit= unit->next_unit())
{
if (unit->first_select()->parent_lex == this &&
- (unit->item == 0 || unit->item->place() != IN_WHERE))
+ (unit->item == 0 ||
+ (unit->item->place() != IN_WHERE && unit->item->place() != IN_ON)))
{
selects_allow_merge= 0;
break;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index d5faf6ee7e9..bdc08b7bd2d 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -616,10 +616,8 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->no_trans_update= no_trans_update;
/*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the binary/update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
+ If auto_increment values are used, save the first one for
+ LAST_INSERT_ID() and for the binary/update log.
*/
if (!id && thd->insert_id_used)
id= thd->last_insert_id;
@@ -784,10 +782,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (write_record(thd, table, &info))
DBUG_RETURN(1);
/*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the binary/update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
+ If auto_increment values are used, save the first one for
+ LAST_INSERT_ID() and for the binary/update log.
*/
if (!id && thd->insert_id_used)
id= thd->last_insert_id;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 18d048df393..1b69e266442 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2422,6 +2422,20 @@ mysql_execute_command(THD *thd)
thd->net.no_send_error= 0;
/*
+ Remember first generated insert id value of the previous
+ statement. We remember it here at the beginning of the statement,
+ and also in Item_func_last_insert_id::fix_fields() and
+ sys_var_last_insert_id::value_ptr(). Last two places are required
+ because LAST_INSERT_ID() and @@LAST_INSERT_ID may also be used in
+ expression that is not executed with mysql_execute_command().
+
+ And we remember it here because some statements read
+ @@LAST_INSERT_ID indirectly, like "SELECT * FROM t1 WHERE id IS
+ NULL", that may replace "id IS NULL" with "id = <LAST_INSERT_ID>".
+ */
+ thd->current_insert_id= thd->last_insert_id;
+
+ /*
In many cases first table of main SELECT_LEX have special meaning =>
check that it is first table in global list and relink it first in
queries_tables list if it is necessary (we need such relinking only
@@ -5636,7 +5650,7 @@ void mysql_reset_thd_for_next_command(THD *thd)
DBUG_ENTER("mysql_reset_thd_for_next_command");
thd->free_list= 0;
thd->select_number= 1;
- thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0;
+ thd->query_start_used= thd->insert_id_used=0;
thd->is_fatal_error= thd->time_zone_used= 0;
thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS |
SERVER_QUERY_NO_INDEX_USED |
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c27bc3b099d..274251f2df3 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -8153,7 +8153,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
Field *field=((Item_field*) args[0])->field;
if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
(thd->options & OPTION_AUTO_IS_NULL) &&
- thd->insert_id() && thd->substitute_null_with_insert_id)
+ thd->current_insert_id && thd->substitute_null_with_insert_id)
{
#ifdef HAVE_QUERY_CACHE
query_cache_abort(&thd->net);
@@ -8161,9 +8161,16 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
new Item_int("last_insert_id()",
- thd->insert_id(),
+ thd->current_insert_id,
21))))
{
+ /*
+ Set THD::last_insert_id_used manually, as this statement
+ uses LAST_INSERT_ID() in a sense, and should issue
+ LAST_INSERT_ID_EVENT.
+ */
+ thd->last_insert_id_used= TRUE;
+
cond=new_cond;
/*
Item_func_eq can't be fixed after creation so we do not check
@@ -11740,8 +11747,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
We must not try to use disabled keys.
*/
usable_keys= table->s->keys_in_use;
- /* we must not consider keys that are disabled by IGNORE INDEX */
- usable_keys.intersect(table->keys_in_use_for_query);
for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 5405a7fd5c3..a4e05a96f9b 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -568,7 +568,7 @@ int mysql_update(THD *thd,
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ thd->insert_id_used ? thd->last_insert_id : 0L,buff);
DBUG_PRINT("info",("%d records updated",updated));
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
@@ -1561,6 +1561,6 @@ bool multi_update::send_eof()
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
::send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ thd->insert_id_used ? thd->last_insert_id : 0L,buff);
return FALSE;
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 4e2b48d9faf..94c5ad331dd 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1574,7 +1574,7 @@ bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view)
list->push_back(fld);
else
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT");
DBUG_RETURN(TRUE);
}
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 764b6dd53c1..cb105d05332 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -5212,11 +5212,13 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $3))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
add_join_on($3,$6);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref STRAIGHT_JOIN table_factor
ON
@@ -5225,12 +5227,14 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $3))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
$3->straight=1;
add_join_on($3,$6);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref normal_join table_ref
USING
@@ -5254,6 +5258,7 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $5))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
@@ -5261,6 +5266,7 @@ join_table:
Lex->pop_context();
$5->outer_join|=JOIN_TYPE_LEFT;
$$=$5;
+ Select->parsing_place= NO_MATTER;
}
| table_ref LEFT opt_outer JOIN_SYM table_factor
{
@@ -5285,6 +5291,7 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $5))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
@@ -5293,6 +5300,7 @@ join_table:
YYABORT;
add_join_on($$, $8);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref RIGHT opt_outer JOIN_SYM table_factor
{
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 3a139aea4c7..93afd9c9e4e 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -228,13 +228,19 @@ bool mysql_create_frm(THD *thd, my_string file_name,
goto err3;
{
- /* Unescape all UCS2 intervals: were escaped in pack_headers */
+ /*
+ Restore all UCS2 intervals.
+ HEX representation of them is not needed anymore.
+ */
List_iterator<create_field> it(create_fields);
create_field *field;
while ((field=it++))
{
- if (field->interval && field->charset->mbminlen > 1)
- unhex_type2(field->interval);
+ if (field->save_interval)
+ {
+ field->interval= field->save_interval;
+ field->save_interval= 0;
+ }
}
}
DBUG_RETURN(0);
@@ -514,18 +520,36 @@ static bool pack_header(uchar *forminfo, enum db_type table_type,
reclength=(uint) (field->offset+ data_offset + length);
n_length+= (ulong) strlen(field->field_name)+1;
field->interval_id=0;
+ field->save_interval= 0;
if (field->interval)
{
uint old_int_count=int_count;
if (field->charset->mbminlen > 1)
{
- /* Escape UCS2 intervals using HEX notation */
+ /*
+ Escape UCS2 intervals using HEX notation to avoid
+ problems with delimiters between enum elements.
+ As the original representation is still needed in
+ the function make_empty_rec to create a record of
+ filled with default values it is saved in save_interval
+ The HEX representation is created from this copy.
+ */
+ field->save_interval= field->interval;
+ field->interval= (TYPELIB*) sql_alloc(sizeof(TYPELIB));
+ *field->interval= *field->save_interval;
+ field->interval->type_names=
+ (const char **) sql_alloc(sizeof(char*) *
+ (field->interval->count+1));
+ field->interval->type_names[field->interval->count]= 0;
+ field->interval->type_lengths=
+ (uint *) sql_alloc(sizeof(uint) * field->interval->count);
+
for (uint pos= 0; pos < field->interval->count; pos++)
{
char *dst;
- uint length= field->interval->type_lengths[pos], hex_length;
- const char *src= field->interval->type_names[pos];
+ uint length= field->save_interval->type_lengths[pos], hex_length;
+ const char *src= field->save_interval->type_names[pos];
hex_length= length * 2;
field->interval->type_lengths[pos]= hex_length;
field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1);
@@ -777,7 +801,8 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
field->charset,
field->geom_type,
field->unireg_check,
- field->interval,
+ field->save_interval ? field->save_interval :
+ field->interval,
field->field_name,
&table);
if (!regfield)