summaryrefslogtreecommitdiff
path: root/sql/sql_update.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_update.cc')
-rw-r--r--sql/sql_update.cc273
1 files changed, 156 insertions, 117 deletions
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 38638d3aa1d..a8b86448d5e 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -231,7 +231,7 @@ static void prepare_record_for_error_message(int error, TABLE *table)
Get the number of the offended index.
We will see MAX_KEY if the engine cannot determine the affected index.
*/
- if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
+ if (unlikely((keynr= table->file->get_dup_key(error)) >= MAX_KEY))
DBUG_VOID_RETURN;
/* Create unique_map with all fields used by that index. */
@@ -471,8 +471,8 @@ int mysql_update(THD *thd,
set_statistics_for_table(thd, table);
select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
- if (error || !limit || thd->is_error() ||
- (select && select->check_quick(thd, safe_update, limit)))
+ if (unlikely(error || !limit || thd->is_error() ||
+ (select && select->check_quick(thd, safe_update, limit))))
{
query_plan.set_impossible_where();
if (thd->lex->describe || thd->lex->analyze_stmt)
@@ -506,7 +506,7 @@ int mysql_update(THD *thd,
goto err;
}
}
- if (init_ftfuncs(thd, select_lex, 1))
+ if (unlikely(init_ftfuncs(thd, select_lex, 1)))
goto err;
table->mark_columns_needed_for_update();
@@ -724,7 +724,7 @@ int mysql_update(THD *thd,
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
reverse);
- if (error)
+ if (unlikely(error))
{
close_cached_file(&tempfile);
goto err;
@@ -733,7 +733,7 @@ int mysql_update(THD *thd,
THD_STAGE_INFO(thd, stage_searching_rows_for_update);
ha_rows tmp_limit= limit;
- while (!(error=info.read_record()) && !thd->killed)
+ while (likely(!(error=info.read_record())) && likely(!thd->killed))
{
explain->buf_tracker.on_record_read();
thd->inc_examined_row_count(1);
@@ -744,8 +744,8 @@ int mysql_update(THD *thd,
explain->buf_tracker.on_record_after_where();
table->file->position(table->record[0]);
- if (my_b_write(&tempfile,table->file->ref,
- table->file->ref_length))
+ if (unlikely(my_b_write(&tempfile,table->file->ref,
+ table->file->ref_length)))
{
error=1; /* purecov: inspected */
break; /* purecov: inspected */
@@ -763,7 +763,7 @@ int mysql_update(THD *thd,
error since in this case the transaction might have been
rolled back already.
*/
- if (error < 0)
+ if (unlikely(error < 0))
{
/* Fatal error from select->skip_record() */
error= 1;
@@ -773,7 +773,7 @@ int mysql_update(THD *thd,
table->file->unlock_row();
}
}
- if (thd->killed && !error)
+ if (unlikely(thd->killed) && !error)
error= 1; // Aborted
limit= tmp_limit;
table->file->try_semi_consistent_read(0);
@@ -790,14 +790,15 @@ int mysql_update(THD *thd,
}
else
{
- select= new SQL_SELECT;
+ if (!(select= new SQL_SELECT))
+ goto err;
select->head=table;
}
- if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
- error=1; /* purecov: inspected */
- select->file=tempfile; // Read row ptrs from this file
- if (error >= 0)
+ if (unlikely(reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)))
+ error= 1; /* purecov: inspected */
+ select->file= tempfile; // Read row ptrs from this file
+ if (unlikely(error >= 0))
goto err;
table->file->ha_end_keyread();
@@ -831,7 +832,7 @@ update_begin:
/* Direct updating is supported */
DBUG_PRINT("info", ("Using direct update"));
table->reset_default_fields();
- if (!(error= table->file->ha_direct_update_rows(&updated)))
+ if (unlikely(!(error= table->file->ha_direct_update_rows(&updated))))
error= -1;
found= updated;
goto update_end;
@@ -942,11 +943,11 @@ update_begin:
error= table->file->ha_update_row(table->record[1],
table->record[0]);
}
- if (error == HA_ERR_RECORD_IS_THE_SAME)
+ if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
{
error= 0;
}
- else if (!error)
+ else if (likely(!error))
{
if (has_vers_fields && table->versioned())
{
@@ -956,14 +957,15 @@ update_begin:
error= vers_insert_history_row(table);
restore_record(table, record[2]);
}
- if (!error)
+ if (likely(!error))
updated_sys_ver++;
}
- if (!error)
+ if (likely(!error))
updated++;
}
- if (error && (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
+ if (unlikely(error) &&
+ (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
{
/*
If (ignore && error is ignorable) we don't have to
@@ -982,8 +984,8 @@ update_begin:
}
if (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
- TRG_ACTION_AFTER, TRUE))
+ unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER, TRUE)))
{
error= 1;
break;
@@ -1036,7 +1038,7 @@ update_begin:
Don't try unlocking the row if skip_record reported an error since in
this case the transaction might have been rolled back already.
*/
- else if (!thd->is_error())
+ else if (likely(!thd->is_error()))
table->file->unlock_row();
else
{
@@ -1044,7 +1046,7 @@ update_begin:
break;
}
thd->get_stmt_da()->inc_current_row_for_warning();
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
break;
@@ -1069,7 +1071,7 @@ update_begin:
};);
error= (killed_status == NOT_KILLED)? error : 1;
- if (error &&
+ if (likely(error) &&
will_batch &&
(loc_error= table->file->exec_bulk_update(&dup_key_found)))
/*
@@ -1127,12 +1129,12 @@ update_end:
Sometimes we want to binlog even if we updated no rows, in case user used
it to be sure master and slave are in same state.
*/
- if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
+ if (likely(error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
{
int errcode= 0;
- if (error < 0)
+ if (likely(error < 0))
thd->clear_error();
else
errcode= query_error_code(thd, killed_status == NOT_KILLED);
@@ -1161,7 +1163,7 @@ update_end:
id= thd->arg_of_last_insert_id_function ?
thd->first_successful_insert_id_in_prev_stmt : 0;
- if (error < 0 && !thd->lex->analyze_stmt)
+ if (likely(error < 0) && likely(!thd->lex->analyze_stmt))
{
char buff[MYSQL_ERRMSG_SIZE];
if (!table->versioned(VERS_TIMESTAMP))
@@ -1187,7 +1189,7 @@ update_end:
*found_return= found;
*updated_return= updated;
- if (thd->lex->analyze_stmt)
+ if (unlikely(thd->lex->analyze_stmt))
goto emit_explain_and_leave;
DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
@@ -1207,7 +1209,7 @@ produce_explain_and_leave:
We come here for various "degenerate" query plans: impossible WHERE,
no-partitions-used, impossible-range, etc.
*/
- if (!query_plan.save_explain_update_data(query_plan.mem_root, thd))
+ if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd)))
goto err;
emit_explain_and_leave:
@@ -1863,7 +1865,7 @@ int multi_update::prepare(List<Item> &not_used_values,
bitmap_union(table->read_set, &table->tmp_set);
}
}
- if (error)
+ if (unlikely(error))
DBUG_RETURN(1);
/*
@@ -1907,14 +1909,14 @@ int multi_update::prepare(List<Item> &not_used_values,
table_count);
values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
table_count);
- if (thd->is_fatal_error)
+ if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
for (i=0 ; i < table_count ; i++)
{
fields_for_table[i]= new List_item;
values_for_table[i]= new List_item;
}
- if (thd->is_fatal_error)
+ if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
/* Split fields into fields_for_table[] and values_by_table[] */
@@ -1926,7 +1928,7 @@ int multi_update::prepare(List<Item> &not_used_values,
fields_for_table[offset]->push_back(item, thd->mem_root);
values_for_table[offset]->push_back(value, thd->mem_root);
}
- if (thd->is_fatal_error)
+ if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
/* Allocate copy fields */
@@ -2058,7 +2060,8 @@ multi_update::initialize_tables(JOIN *join)
TABLE_LIST *table_ref;
DBUG_ENTER("initialize_tables");
- if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join))
+ if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
+ error_if_full_join(join)))
DBUG_RETURN(1);
main_table=join->join_tab->table;
table_to_update= 0;
@@ -2164,22 +2167,12 @@ loop_end:
tbl->prepare_for_position();
join->map2table[tbl->tablenr]->keep_current_rowid= true;
- Field_string *field= new Field_string(tbl->file->ref_length, 0,
- &field_name,
- &my_charset_bin);
- if (!field)
- DBUG_RETURN(1);
- field->init(tbl);
- /*
- The field will be converted to varstring when creating tmp table if
- table to be updated was created by mysql 4.1. Deny this.
- */
- field->can_alter_field_type= 0;
- Item_field *ifield= new (thd->mem_root) Item_field(join->thd, (Field *) field);
- if (!ifield)
+ Item_temptable_rowid *item=
+ new (thd->mem_root) Item_temptable_rowid(tbl);
+ if (!item)
DBUG_RETURN(1);
- ifield->maybe_null= 0;
- if (temp_fields.push_back(ifield, thd->mem_root))
+ item->fix_fields(thd, 0);
+ if (temp_fields.push_back(item, thd->mem_root))
DBUG_RETURN(1);
} while ((tbl= tbl_it++));
@@ -2190,10 +2183,10 @@ loop_end:
group.direction= ORDER::ORDER_ASC;
group.item= (Item**) temp_fields.head_ref();
- tmp_param->quick_group=1;
- tmp_param->field_count=temp_fields.elements;
- tmp_param->group_parts=1;
- tmp_param->group_length= table->file->ref_length;
+ tmp_param->quick_group= 1;
+ tmp_param->field_count= temp_fields.elements;
+ tmp_param->func_count= temp_fields.elements - 1;
+ calc_group_buffer(tmp_param, &group);
/* small table, ignore SQL_BIG_TABLES */
my_bool save_big_tables= thd->variables.big_tables;
thd->variables.big_tables= FALSE;
@@ -2205,10 +2198,66 @@ loop_end:
DBUG_RETURN(1);
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
+ join->tmp_table_keep_current_rowid= TRUE;
DBUG_RETURN(0);
}
+static TABLE *item_rowid_table(Item *item)
+{
+ if (item->type() != Item::FUNC_ITEM)
+ return NULL;
+ Item_func *func= (Item_func *)item;
+ if (func->functype() != Item_func::TEMPTABLE_ROWID)
+ return NULL;
+ Item_temptable_rowid *itr= (Item_temptable_rowid *)func;
+ return itr->table;
+}
+
+
+/*
+ multi_update stores a rowid and new field values for every updated row in a
+ temporary table (one temporary table per updated table). These rowids are
+ obtained via Item_temptable_rowid's by calling handler::position(). But if
+ the join is resolved via a temp table, rowids cannot be obtained from
+ handler::position() in the multi_update::send_data(). So, they're stored in
+ the join's temp table (JOIN::add_fields_for_current_rowid()) and here we
+ replace Item_temptable_rowid's (that would've done handler::position()) with
+ Item_field's (that will simply take the corresponding field value from the
+ temp table).
+*/
+int multi_update::prepare2(JOIN *join)
+{
+ if (!join->need_tmp || !join->tmp_table_keep_current_rowid)
+ return 0;
+
+ // there cannot be many tmp tables in multi-update
+ JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt();
+
+ for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++)
+ {
+ TABLE *tbl= item_rowid_table(*it);
+ if (!tbl)
+ continue;
+ for (uint i= 0; i < table_count; i++)
+ {
+ for (Item **it2= tmp_table_param[i].items_to_copy; *it2; it2++)
+ {
+ if (item_rowid_table(*it2) != tbl)
+ continue;
+ Item *fld= new (thd->mem_root)
+ Item_field(thd, (*it)->get_tmp_table_field());
+ if (!fld)
+ return 1;
+ fld->set_result_field((*it2)->get_tmp_table_field());
+ *it2= fld;
+ }
+ }
+ }
+ return 0;
+}
+
+
multi_update::~multi_update()
{
TABLE_LIST *table;
@@ -2295,7 +2344,8 @@ int multi_update::send_data(List<Item> &not_used_values)
{
int error;
- if (table->default_field && table->update_default_fields(1, ignore))
+ if (table->default_field &&
+ unlikely(table->update_default_fields(1, ignore)))
DBUG_RETURN(1);
if ((error= cur_table->view_check_option(thd, ignore)) !=
@@ -2304,10 +2354,10 @@ int multi_update::send_data(List<Item> &not_used_values)
found--;
if (error == VIEW_CHECK_SKIP)
continue;
- else if (error == VIEW_CHECK_ERROR)
+ else if (unlikely(error == VIEW_CHECK_ERROR))
DBUG_RETURN(1);
}
- if (!updated++)
+ if (unlikely(!updated++))
{
/*
Inform the main table that we are going to update the table even
@@ -2316,8 +2366,8 @@ int multi_update::send_data(List<Item> &not_used_values)
*/
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
}
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])) &&
+ if (unlikely((error=table->file->ha_update_row(table->record[1],
+ table->record[0]))) &&
error != HA_ERR_RECORD_IS_THE_SAME)
{
updated--;
@@ -2340,7 +2390,7 @@ int multi_update::send_data(List<Item> &not_used_values)
}
else
{
- if (error == HA_ERR_RECORD_IS_THE_SAME)
+ if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
{
error= 0;
updated--;
@@ -2372,55 +2422,42 @@ int multi_update::send_data(List<Item> &not_used_values)
}
}
if (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
- TRG_ACTION_AFTER, TRUE))
+ unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER, TRUE)))
DBUG_RETURN(1);
}
else
{
int error;
TABLE *tmp_table= tmp_tables[offset];
- /*
- For updatable VIEW store rowid of the updated table and
- rowids of tables used in the CHECK OPTION condition.
- */
- uint field_num= 0;
- List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
- /* Set first tbl = table and then tbl to tables from tbl_it */
- TABLE *tbl= table;
- do
- {
- tbl->file->position(tbl->record[0]);
- memcpy((char*) tmp_table->field[field_num]->ptr,
- (char*) tbl->file->ref, tbl->file->ref_length);
- /*
- For outer joins a rowid field may have no NOT_NULL_FLAG,
- so we have to reset NULL bit for this field.
- (set_notnull() resets NULL bit only if available).
- */
- tmp_table->field[field_num]->set_notnull();
- field_num++;
- } while ((tbl= tbl_it++));
-
+ if (copy_funcs(tmp_table_param[offset].items_to_copy, thd))
+ DBUG_RETURN(1);
/* Store regular updated fields in the row. */
+ DBUG_ASSERT(1 + unupdated_check_opt_tables.elements ==
+ tmp_table_param[offset].func_count);
fill_record(thd, tmp_table,
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
*values_for_table[offset], TRUE, FALSE);
/* Write row, ignoring duplicated updates to a row */
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
- if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
+ found++;
+ if (unlikely(error))
{
- if (error &&
- create_internal_tmp_table_from_heap(thd, tmp_table,
- tmp_table_param[offset].start_recinfo,
- &tmp_table_param[offset].recinfo,
- error, 1, NULL))
+ found--;
+ if (error != HA_ERR_FOUND_DUPP_KEY &&
+ error != HA_ERR_FOUND_DUPP_UNIQUE)
{
- do_update= 0;
- DBUG_RETURN(1); // Not a table_is_full error
+ if (create_internal_tmp_table_from_heap(thd, tmp_table,
+ tmp_table_param[offset].start_recinfo,
+ &tmp_table_param[offset].recinfo,
+ error, 1, NULL))
+ {
+ do_update= 0;
+ DBUG_RETURN(1); // Not a table_is_full error
+ }
+ found++;
}
- found++;
}
}
}
@@ -2431,8 +2468,8 @@ int multi_update::send_data(List<Item> &not_used_values)
void multi_update::abort_result_set()
{
/* the error was handled or nothing deleted and no side effects return */
- if (error_handled ||
- (!thd->transaction.stmt.modified_non_trans_table && !updated))
+ if (unlikely(error_handled ||
+ (!thd->transaction.stmt.modified_non_trans_table && !updated)))
return;
/* Something already updated so we have to invalidate cache */
@@ -2522,7 +2559,7 @@ int multi_update::do_updates()
org_updated= updated;
tmp_table= tmp_tables[cur_table->shared];
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
- if ((local_error= table->file->ha_rnd_init(0)))
+ if (unlikely((local_error= table->file->ha_rnd_init(0))))
{
err_table= table;
goto err;
@@ -2545,7 +2582,7 @@ int multi_update::do_updates()
check_opt_it.rewind();
while(TABLE *tbl= check_opt_it++)
{
- if ((local_error= tbl->file->ha_rnd_init(1)))
+ if (unlikely((local_error= tbl->file->ha_rnd_init(1))))
{
err_table= tbl;
goto err;
@@ -2569,7 +2606,7 @@ int multi_update::do_updates()
}
copy_field_end=copy_field_ptr;
- if ((local_error= tmp_table->file->ha_rnd_init(1)))
+ if (unlikely((local_error= tmp_table->file->ha_rnd_init(1))))
{
err_table= tmp_table;
goto err;
@@ -2584,12 +2621,11 @@ int multi_update::do_updates()
thd->fatal_error();
goto err2;
}
- if ((local_error= tmp_table->file->ha_rnd_next(tmp_table->record[0])))
+ if (unlikely((local_error=
+ tmp_table->file->ha_rnd_next(tmp_table->record[0]))))
{
if (local_error == HA_ERR_END_OF_FILE)
break;
- if (local_error == HA_ERR_RECORD_DELETED)
- continue; // May happen on dup key
err_table= tmp_table;
goto err;
}
@@ -2600,9 +2636,10 @@ int multi_update::do_updates()
uint field_num= 0;
do
{
- if ((local_error=
- tbl->file->ha_rnd_pos(tbl->record[0],
- (uchar *) tmp_table->field[field_num]->ptr)))
+ if (unlikely((local_error=
+ tbl->file->ha_rnd_pos(tbl->record[0],
+ (uchar *) tmp_table->
+ field[field_num]->ptr))))
{
err_table= tbl;
goto err;
@@ -2611,8 +2648,8 @@ int multi_update::do_updates()
} while ((tbl= check_opt_it++));
if (table->vfield &&
- table->update_virtual_fields(table->file,
- VCOL_UPDATE_INDEXED_FOR_UPDATE))
+ unlikely(table->update_virtual_fields(table->file,
+ VCOL_UPDATE_INDEXED_FOR_UPDATE)))
goto err2;
table->status|= STATUS_UPDATED;
@@ -2646,7 +2683,7 @@ int multi_update::do_updates()
{
if (error == VIEW_CHECK_SKIP)
continue;
- else if (error == VIEW_CHECK_ERROR)
+ else if (unlikely(error == VIEW_CHECK_ERROR))
{
thd->fatal_error();
goto err2;
@@ -2655,8 +2692,9 @@ int multi_update::do_updates()
if (has_vers_fields && table->versioned())
table->vers_update_fields();
- if ((local_error=table->file->ha_update_row(table->record[1],
- table->record[0])) &&
+ if (unlikely((local_error=
+ table->file->ha_update_row(table->record[1],
+ table->record[0]))) &&
local_error != HA_ERR_RECORD_IS_THE_SAME)
{
if (!ignore ||
@@ -2691,8 +2729,8 @@ int multi_update::do_updates()
}
if (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
- TRG_ACTION_AFTER, TRUE))
+ unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER, TRUE)))
goto err2;
}
@@ -2762,7 +2800,7 @@ bool multi_update::send_eof()
error takes into account killed status gained in do_updates()
*/
int local_error= thd->is_error();
- if (!local_error)
+ if (likely(!local_error))
local_error = (table_count) ? do_updates() : 0;
/*
if local_error is not set ON until after do_updates() then
@@ -2792,12 +2830,13 @@ bool multi_update::send_eof()
thd->transaction.all.m_unsafe_rollback_flags|=
(thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
- if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
+ if (likely(local_error == 0 ||
+ thd->transaction.stmt.modified_non_trans_table))
{
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
{
int errcode= 0;
- if (local_error == 0)
+ if (likely(local_error == 0))
thd->clear_error();
else
errcode= query_error_code(thd, killed_status == NOT_KILLED);
@@ -2824,10 +2863,10 @@ bool multi_update::send_eof()
DBUG_ASSERT(trans_safe || !updated ||
thd->transaction.stmt.modified_non_trans_table);
- if (local_error != 0)
+ if (likely(local_error != 0))
error_handled= TRUE; // to force early leave from ::abort_result_set()
- if (local_error > 0) // if the above log write did not fail ...
+ if (unlikely(local_error > 0)) // if the above log write did not fail ...
{
/* Safety: If we haven't got an error before (can happen in do_updates) */
my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",