summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikita Malyavin <nikitamalyavin@gmail.com>2023-01-18 14:49:35 +0300
committerNikita Malyavin <nikitamalyavin@gmail.com>2023-01-18 14:49:35 +0300
commit485ed96f56f659530d5bc4f911196252913899d0 (patch)
tree5818dd4f38772b7145c0909c425be0e6d513acd7
parent15f86d37374afece5017a497918797464e47884c (diff)
downloadmariadb-git-bb-10.5-nikita-MDEV-30046.tar.gz
REfactor REPLACE!!!!bb-10.5-nikita-MDEV-30046
-rw-r--r--sql/log_event_server.cc18
-rw-r--r--sql/sql_insert.cc696
-rw-r--r--sql/sql_insert.h119
3 files changed, 473 insertions, 360 deletions
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 761cca69028..a1c9542690d 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -7337,27 +7337,13 @@ Rows_log_event::write_row(rpl_group_info *rgi,
DBUG_DUMP("record[0] (after)", table->record[0], table->s->reclength);
COPY_INFO info;
- auto result= replace_row(table, keynum, &info, invoke_triggers,
+ int error= replace_row(table, keynum, &info, invoke_triggers,
m_vers_from_plain &&
m_table->versioned(VERS_TIMESTAMP));
if (result.updated)
{
- switch (result.error)
- {
- case HA_ERR_RECORD_IS_THE_SAME:
- DBUG_PRINT("info", ("ignoring HA_ERR_RECORD_IS_THE_SAME error from"
- " ha_update_row()"));
- result.error= 0;
- // fall through
- case 0:
- break;
-
- default:
- DBUG_PRINT("info", ("ha_update_row() returns error %d", result.error));
- table->file->print_error(result.error, MYF(0));
- }
- DBUG_RETURN(result.error);
+ DBUG_RETURN(error);
}
if (result.error)
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 234579fcce9..5819dbe22f5 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1727,9 +1727,8 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
}
- /* Check if there is more uniq keys after field */
-
-static int last_uniq_key(TABLE *table,uint keynr)
+/* Check if there is more uniq keys after field */
+uint Write_record::get_last_unique_key(TABLE *table)
{
/*
When an underlying storage engine informs that the unique key
@@ -1746,12 +1745,12 @@ static int last_uniq_key(TABLE *table,uint keynr)
a delete + insert always.
*/
if (table->file->ha_table_flags() & HA_DUPLICATE_KEY_NOT_IN_ORDER)
- return 0;
+ return ~0U;
- while (++keynr < table->s->keys)
- if (table->key_info[keynr].flags & HA_NOSAME)
- return 0;
- return 1;
+ for (int key= table->s->keys - 1; key; --key)
+ if (table->key_info[key].flags & HA_NOSAME)
+ return key;
+ return ~0;
}
@@ -1795,7 +1794,7 @@ int vers_insert_history_row(TABLE *table)
- use ha_index_read_idx_map() with the key that is duplicated, to
retrieve the offending row.
*/
-int locate_dup_record(THD *thd, TABLE *table, uchar *&key, uint keynum)
+int Write_record::locate_dup_record()
{
handler *h= table->file;
int error= 0;
@@ -1835,9 +1834,9 @@ int locate_dup_record(THD *thd, TABLE *table, uchar *&key, uint keynum)
}
}
- key_copy(key, table->record[0], table->key_info + keynum, 0);
+ key_copy(key, table->record[0], table->key_info + key_nr, 0);
- error= h->ha_index_read_idx_map(table->record[1], keynum, key,
+ error= h->ha_index_read_idx_map(table->record[1], key_nr, key,
HA_WHOLE_KEY, HA_READ_KEY_EXACT);
if (unlikely(error))
{
@@ -1862,7 +1861,18 @@ int locate_dup_record(THD *thd, TABLE *table, uchar *&key, uint keynum)
return error;
}
+template<class F>
+class On_exit
+{
+ const F &func;
+public:
+ On_exit(const F &func): func(func){}
+ ~On_exit() { func(); }
+};
+template<class F>
+static On_exit<F> on_exit(const F &func)
+{ return On_exit<F>(func); }
/**
REPLACE is defined as either INSERT or DELETE + INSERT. If
@@ -1880,366 +1890,391 @@ int locate_dup_record(THD *thd, TABLE *table, uchar *&key, uint keynum)
* when we have referenced keys, as there can be different ON DELETE/ON UPDATE
actions
*/
-replace_execution_result replace_row(TABLE *table, uint key_nr,
- COPY_INFO *info, bool use_triggers,
- bool versioned)
+template<bool can_optimize>
+int Write_record::replace_row()
{
- replace_execution_result result{};
- if (last_uniq_key(table, key_nr) &&
- !table->file->referenced_by_foreign_key() && use_triggers)
+ int error;
+ while (unlikely(error=table->file->ha_write_row(table->record[0])))
{
- DBUG_PRINT("info", ("Updating row using ha_update_row()"));
- if (table->versioned(VERS_TRX_ID))
+ error= prepare_handle_duplicate(error);
+ if (unlikely(error))
+ return error;
+
+ if (can_optimize && key_nr == last_unique_key)
{
- bitmap_set_bit(table->write_set, table->vers_start_field()->field_index);
- table->file->column_bitmaps_signal();
- table->vers_start_field()->store(0, false);
+ DBUG_PRINT("info", ("Updating row using ha_update_row()"));
+ if (table->versioned(VERS_TRX_ID))
+ {
+ bitmap_set_bit(table->write_set,
+ table->vers_start_field()->field_index);
+ table->file->column_bitmaps_signal();
+ table->vers_start_field()->store(0, false);
+ }
+ error = table->file->ha_update_row(table->record[1], table->record[0]);
+
+ if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
+ {
+ DBUG_PRINT("info", ("ha_update_row() returns error %d", error));
+ return on_error(error);
+ }
+ else if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
+ {
+ DBUG_PRINT("info", ("ignoring HA_ERR_RECORD_IS_THE_SAME error from"
+ " ha_update_row()"));
+ }
+ else
+ {
+ info->deleted++;
+ if (!table->file->has_transactions())
+ table->in_use->transaction->stmt.modified_non_trans_table = TRUE;
+ if (versioned)
+ {
+ store_record(table, record[2]);
+ error = vers_insert_history_row(table);
+ restore_record(table, record[2]);
+ if (unlikely(error))
+ return on_error(error);
+ }
+ }
+ after_update();
+ break;
}
- result.error= table->file->ha_update_row(table->record[1], table->record[0]);
- if (unlikely(result.error) && result.error != HA_ERR_RECORD_IS_THE_SAME)
- return result;
- if (likely(!result.error))
+ else
{
- info->deleted++;
- if (!table->file->has_transactions())
- table->in_use->transaction->stmt.modified_non_trans_table= TRUE;
- if (versioned)
+ DBUG_PRINT("info", ("Deleting offending row and trying to write"
+ " new one again"));
+
+ auto *trg = table->triggers;
+ if (use_triggers && trg->process_triggers(table->in_use, TRG_EVENT_DELETE,
+ TRG_ACTION_BEFORE, TRUE))
+ return restore_on_error();
+ if (!versioned)
+ error = table->file->ha_delete_row(table->record[1]);
+ else
{
store_record(table, record[2]);
- result.error= vers_insert_history_row(table);
+ restore_record(table, record[1]);
+ table->vers_update_end();
+ error = table->file->ha_update_row(table->record[1], table->record[0]);
restore_record(table, record[2]);
}
- }
- else
- result.error= 0; // error was HA_ERR_RECORD_IS_THE_SAME
- result.updated= true;
- }
- else
- {
- DBUG_PRINT("info", ("Deleting offending row and trying to write"
- " new one again"));
- if (table->triggers &&
- table->triggers->process_triggers(table->in_use, TRG_EVENT_DELETE,
- TRG_ACTION_BEFORE, TRUE))
- {
- result.before_trg_error= true;
- return result;
- }
-
- if (!versioned)
- result.error= table->file->ha_delete_row(table->record[1]);
- else
- {
- store_record(table, record[2]);
- restore_record(table, record[1]);
- table->vers_update_end();
- result.error= table->file->ha_update_row(table->record[1],
- table->record[0]);
- restore_record(table, record[2]);
- }
+ if (unlikely(error))
+ {
+ DBUG_PRINT("info", ("ha_delete_row() returns error %d", error));
+ return on_error(error);
+ }
+ if (!versioned)
+ info->deleted++;
+ else
+ info->updated++;
+ if (!table->file->has_transactions_and_rollback())
+ table->in_use->transaction->stmt.modified_non_trans_table = TRUE;
+ if (use_triggers)
+ {
+ after_trg_error = trg->process_triggers(table->in_use, TRG_EVENT_DELETE,
+ TRG_ACTION_AFTER, TRUE);
+ if (unlikely(after_trg_error))
+ {
+ after_insert();
+ return 1;
+ }
+ }
- if (unlikely(result.error))
- {
- DBUG_PRINT("info", ("ha_delete_row() returns error %d", result.error));
- return result;
+ /*
+ If more than one iteration of the above while loop is done, from
+ the second one the row being inserted will have an explicit
+ value in the autoinc field, which was set at the first call of
+ handler::update_auto_increment(). This value is saved to avoid
+ thd->insert_id_for_cur_row becoming 0. Use this saved autoinc
+ value.
+ */
+ if (table->file->insert_id_for_cur_row == 0)
+ table->file->insert_id_for_cur_row = insert_id_for_cur_row;
}
- if (!versioned)
- info->deleted++;
- else
- info->updated++;
- if (!table->file->has_transactions_and_rollback())
- table->in_use->transaction->stmt.modified_non_trans_table= TRUE;
- if (table->triggers &&
- table->triggers->process_triggers(table->in_use, TRG_EVENT_DELETE,
- TRG_ACTION_AFTER, TRUE))
- result.after_trg_error= true;
}
- return result;
+ after_insert();
+ return 0;
}
-/*
- Write a record to table with optional deleting of conflicting records,
- invoke proper triggers if needed.
+int Write_record::update_duplicate()
+{
+ int res= table->file->ha_write_row(table->record[0]);
+ if (likely(!res))
+ return 0;
- SYNOPSIS
- write_record()
- thd - thread context
- table - table to which record should be written
- info - COPY_INFO structure describing handling of duplicates
- and which is used for counting number of records inserted
- and deleted.
- sink - result sink for the RETURNING clause
-
- NOTE
- Once this record will be written to table after insert trigger will
- be invoked. If instead of inserting new record we will update old one
- then both on update triggers will work instead. Similarly both on
- delete triggers will be invoked if we will delete conflicting records.
-
- Sets thd->transaction.stmt.modified_non_trans_table to TRUE if table which
- is updated didn't have transactions.
-
- RETURN VALUE
- 0 - success
- non-0 - error
-*/
+ DBUG_ASSERT(table->read_set == &table->def_read_set);
+ DBUG_ASSERT(table->write_set == &table->def_write_set);
+ auto def_bitmaps= on_exit([this](){ table->default_column_bitmaps(); });
+ res=prepare_handle_duplicate(res);
+ if (unlikely(res))
+ return res;
-int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
-{
- int error, trg_error= 0;
- uchar *key=0;
- MY_BITMAP *save_read_set, *save_write_set;
- ulonglong prev_insert_id= table->file->next_insert_id;
- ulonglong insert_id_for_cur_row= 0;
ulonglong prev_insert_id_for_cur_row= 0;
- DBUG_ENTER("write_record");
+ /*
+ We don't check for other UNIQUE keys - the first row
+ that matches, is updated. If update causes a conflict again,
+ an error is returned
+ */
+ DBUG_ASSERT(table->insert_values != NULL);
+ store_record(table,insert_values);
+ restore_record(table,record[1]);
+ table->reset_default_fields();
- info->records++;
- save_read_set= table->read_set;
- save_write_set= table->write_set;
+ /*
+ in INSERT ... ON DUPLICATE KEY UPDATE the set of modified fields can
+ change per row. Thus, we have to do reset_default_fields() per row.
+ Twice (before insert and before update).
+ */
+ DBUG_ASSERT(info->update_fields->elements ==
+ info->update_values->elements);
+ if (fill_record_n_invoke_before_triggers(thd, table,
+ *info->update_fields,
+ *info->update_values,
+ info->ignore,
+ TRG_EVENT_UPDATE))
+ return restore_on_error();
+
+ bool different_records= (!records_are_comparable(table) ||
+ compare_record(table));
+ /*
+ Default fields must be updated before checking view updateability.
+ This branch of INSERT is executed only when a UNIQUE key was violated
+ with the ON DUPLICATE KEY UPDATE option. In this case the INSERT
+ operation is transformed to an UPDATE, and the default fields must
+ be updated as if this is an UPDATE.
+ */
+ if (different_records && table->default_field)
+ table->evaluate_update_default_function();
- if (info->handle_duplicates == DUP_REPLACE ||
- info->handle_duplicates == DUP_UPDATE)
+ /* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */
+ res= info->table_list->view_check_option(table->in_use, info->ignore);
+ if (unlikely(res))
{
- while (unlikely(error=table->file->ha_write_row(table->record[0])))
+ if (res == VIEW_CHECK_ERROR)
+ return restore_on_error();
+ DBUG_ASSERT(res == VIEW_CHECK_SKIP);
+ return 0;
+ }
+
+ table->file->restore_auto_increment(prev_insert_id);
+ info->touched++;
+ if (different_records)
+ {
+ int error= table->file->ha_update_row(table->record[1], table->record[0]);
+
+ if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
{
- uint key_nr;
- /*
- If we do more than one iteration of this loop, from the second one the
- row will have an explicit value in the autoinc field, which was set at
- the first call of handler::update_auto_increment(). So we must save
- the autogenerated value to avoid thd->insert_id_for_cur_row to become
- 0.
- */
- if (table->file->insert_id_for_cur_row > 0)
- insert_id_for_cur_row= table->file->insert_id_for_cur_row;
- else
- table->file->insert_id_for_cur_row= insert_id_for_cur_row;
- bool is_duplicate_key_error;
- if (table->file->is_fatal_error(error, HA_CHECK_ALL))
- goto err;
- is_duplicate_key_error=
- table->file->is_fatal_error(error, HA_CHECK_ALL & ~HA_CHECK_DUP);
- if (!is_duplicate_key_error)
+ if (info->ignore && !table->file->is_fatal_error(error, HA_CHECK_ALL))
{
- /*
- We come here when we had an ignorable error which is not a duplicate
- key error. In this we ignore error if ignore flag is set, otherwise
- report error as usual. We will not do any duplicate key processing.
- */
- if (info->ignore)
- {
+ if (!(thd->variables.old_behavior &
+ OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE))
table->file->print_error(error, MYF(ME_WARNING));
- goto after_trg_or_ignored_err; /* Ignoring a not fatal error */
- }
- goto err;
- }
- if (unlikely((int) (key_nr = table->file->get_dup_key(error)) < 0))
- {
- error= HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */
- goto err;
+ notify_non_trans_table_modified();
+ return 0;
}
- DEBUG_SYNC(thd, "write_row_replace");
-
- /* Read all columns for the row we are going to replace */
- table->use_all_columns();
- /*
- Don't allow REPLACE to replace a row when a auto_increment column
- was used. This ensures that we don't get a problem when the
- whole range of the key has been used.
- */
- if (info->handle_duplicates == DUP_REPLACE && table->next_number_field &&
- key_nr == table->s->next_number_index && insert_id_for_cur_row > 0)
- goto err;
-
- error= locate_dup_record(thd, table, key, key_nr);
- if (error)
- goto err;
+ return on_error(error);
+ }
- if (info->handle_duplicates == DUP_UPDATE)
+ if (error != HA_ERR_RECORD_IS_THE_SAME)
+ {
+ info->updated++;
+ if (table->versioned())
{
- int res= 0;
- /*
- We don't check for other UNIQUE keys - the first row
- that matches, is updated. If update causes a conflict again,
- an error is returned
- */
- DBUG_ASSERT(table->insert_values != NULL);
- store_record(table,insert_values);
- restore_record(table,record[1]);
- table->reset_default_fields();
-
- /*
- in INSERT ... ON DUPLICATE KEY UPDATE the set of modified fields can
- change per row. Thus, we have to do reset_default_fields() per row.
- Twice (before insert and before update).
- */
- DBUG_ASSERT(info->update_fields->elements ==
- info->update_values->elements);
- if (fill_record_n_invoke_before_triggers(thd, table,
- *info->update_fields,
- *info->update_values,
- info->ignore,
- TRG_EVENT_UPDATE))
- goto before_trg_err;
-
- bool different_records= (!records_are_comparable(table) ||
- compare_record(table));
- /*
- Default fields must be updated before checking view updateability.
- This branch of INSERT is executed only when a UNIQUE key was violated
- with the ON DUPLICATE KEY UPDATE option. In this case the INSERT
- operation is transformed to an UPDATE, and the default fields must
- be updated as if this is an UPDATE.
- */
- if (different_records && table->default_field)
- table->evaluate_update_default_function();
-
- /* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */
- res= info->table_list->view_check_option(table->in_use, info->ignore);
- if (res == VIEW_CHECK_SKIP)
- goto after_trg_or_ignored_err;
- if (res == VIEW_CHECK_ERROR)
- goto before_trg_err;
-
- table->file->restore_auto_increment(prev_insert_id);
- info->touched++;
- if (different_records)
+ if (table->versioned(VERS_TIMESTAMP))
{
- if (unlikely(error=table->file->ha_update_row(table->record[1],
- table->record[0])) &&
- error != HA_ERR_RECORD_IS_THE_SAME)
+ store_record(table, record[2]);
+ if ((error= vers_insert_history_row(table)))
{
- if (info->ignore &&
- !table->file->is_fatal_error(error, HA_CHECK_ALL))
- {
- if (!(thd->variables.old_behavior &
- OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE))
- table->file->print_error(error, MYF(ME_WARNING));
- goto after_trg_or_ignored_err;
- }
- goto err;
+ info->last_errno= error;
+ table->file->print_error(error, MYF(0));
+ restore_record(table, record[2]);
+ notify_non_trans_table_modified();
+ return 1;
}
-
- if (error != HA_ERR_RECORD_IS_THE_SAME)
- {
- info->updated++;
- if (table->versioned())
- {
- if (table->versioned(VERS_TIMESTAMP))
- {
- store_record(table, record[2]);
- if ((error= vers_insert_history_row(table)))
- {
- info->last_errno= error;
- table->file->print_error(error, MYF(0));
- trg_error= 1;
- restore_record(table, record[2]);
- goto after_trg_or_ignored_err;
- }
- restore_record(table, record[2]);
- }
- info->copied++;
- }
- }
- else
- error= 0;
- /*
- If ON DUP KEY UPDATE updates a row instead of inserting
- one, it's like a regular UPDATE statement: it should not
- affect the value of a next SELECT LAST_INSERT_ID() or
- mysql_insert_id(). Except if LAST_INSERT_ID(#) was in the
- INSERT query, which is handled separately by
- THD::arg_of_last_insert_id_function.
- */
- prev_insert_id_for_cur_row= table->file->insert_id_for_cur_row;
- insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0;
- trg_error= (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
- TRG_ACTION_AFTER, TRUE));
- info->copied++;
- }
-
- /*
- Only update next_insert_id if the AUTO_INCREMENT value was explicitly
- updated, so we don't update next_insert_id with the value from the
- row being updated. Otherwise reset next_insert_id to what it was
- before the duplicate key error, since that value is unused.
- */
- if (table->next_number_field_updated)
- {
- DBUG_ASSERT(table->next_number_field != NULL);
-
- table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
- }
- else if (prev_insert_id_for_cur_row)
- {
- table->file->restore_auto_increment(prev_insert_id_for_cur_row);
+ restore_record(table, record[2]);
}
- goto ok;
- }
- else /* DUP_REPLACE */
- {
- auto result= replace_row(table, key_nr, info,
- table->triggers &&
- table->triggers->has_delete_triggers(),
- table->versioned(VERS_TIMESTAMP));
- if (result.before_trg_error)
- goto before_trg_err;
- if (result.error)
- goto err;
- if ((trg_error= result.after_trg_error))
- goto after_trg_or_ignored_err;
- if (result.updated)
- goto after_trg_n_copied_inc;
-
- /* Let us attempt write_row() once more */
+ info->copied++;
}
}
-
- /*
- If more than one iteration of the above while loop is done, from
- the second one the row being inserted will have an explicit
- value in the autoinc field, which was set at the first call of
- handler::update_auto_increment(). This value is saved to avoid
- thd->insert_id_for_cur_row becoming 0. Use this saved autoinc
- value.
- */
- if (table->file->insert_id_for_cur_row == 0)
- table->file->insert_id_for_cur_row= insert_id_for_cur_row;
-
+ else
+ error= 0;
/*
- Restore column maps if they where replaced during an duplicate key
- problem.
+ If ON DUP KEY UPDATE updates a row instead of inserting
+ one, it's like a regular UPDATE statement: it should not
+ affect the value of a next SELECT LAST_INSERT_ID() or
+ mysql_insert_id(). Except if LAST_INSERT_ID(#) was in the
+ INSERT query, which is handled separately by
+ THD::arg_of_last_insert_id_function.
*/
- if (table->read_set != save_read_set ||
- table->write_set != save_write_set)
- table->column_bitmaps_set(save_read_set, save_write_set);
+ prev_insert_id_for_cur_row= table->file->insert_id_for_cur_row;
+ insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0;
+ after_trg_error= table->triggers &&
+ table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER, TRUE);
+ info->copied++;
+ }
+
+ /*
+ Only update next_insert_id if the AUTO_INCREMENT value was explicitly
+ updated, so we don't update next_insert_id with the value from the
+ row being updated. Otherwise reset next_insert_id to what it was
+ before the duplicate key error, since that value is unused.
+ */
+ if (table->next_number_field_updated)
+ {
+ DBUG_ASSERT(table->next_number_field != NULL);
+
+ table->file->adjust_next_insert_id_after_explicit_value(
+ table->next_number_field->val_int());
}
- else if (unlikely((error=table->file->ha_write_row(table->record[0]))))
+ else if (prev_insert_id_for_cur_row)
+ {
+ table->file->restore_auto_increment(prev_insert_id_for_cur_row);
+ }
+
+ after_update();
+ return after_trg_error;
+}
+
+
+int Write_record::single_insert()
+{
+ prev_insert_id= table->file->next_insert_id;
+ info->records++;
+
+ int error= table->file->ha_write_row(table->record[0]);
+ if (unlikely(error))
{
DEBUG_SYNC(thd, "write_row_noreplace");
if (!info->ignore ||
table->file->is_fatal_error(error, HA_CHECK_ALL))
- goto err;
+ return on_error(error);
if (!(thd->variables.old_behavior &
OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE))
table->file->print_error(error, MYF(ME_WARNING));
table->file->restore_auto_increment(prev_insert_id);
- goto after_trg_or_ignored_err;
+ notify_non_trans_table_modified();
+ return 0;
}
+ after_insert();
+ return after_trg_error;
+}
-after_trg_n_copied_inc:
+int Write_record::prepare_handle_duplicate(int error)
+{
+ /*
+ If we do more than one iteration of this loop, from the second one the
+ row will have an explicit value in the autoinc field, which was set at
+ the first call of handler::update_auto_increment(). So we must save
+ the autogenerated value to avoid thd->insert_id_for_cur_row to become
+ 0.
+ */
+ if (table->file->insert_id_for_cur_row > 0)
+ insert_id_for_cur_row= table->file->insert_id_for_cur_row;
+ else
+ table->file->insert_id_for_cur_row= insert_id_for_cur_row;
+ if (table->file->is_fatal_error(error, HA_CHECK_ALL))
+ return on_error(error);
+ bool is_duplicate_key_error=
+ table->file->is_fatal_error(error, HA_CHECK_ALL & ~HA_CHECK_DUP);
+ if (!is_duplicate_key_error)
+ {
+ /*
+ We come here when we had an ignorable error which is not a duplicate
+ key error. In this we ignore error if ignore flag is set, otherwise
+ report error as usual. We will not do any duplicate key processing.
+ */
+ if (info->ignore)
+ {
+ table->file->print_error(error, MYF(ME_WARNING));
+ notify_non_trans_table_modified();
+ return 0; /* Ignoring a not fatal error */
+ }
+ return on_error(error);
+ }
+
+ key_nr = table->file->get_dup_key(error));
+
+ if (unlikely((int) (key_nr < 0)))
+ return on_error(HA_ERR_FOUND_DUPP_KEY); /* Database can't find key */
+
+ DEBUG_SYNC(thd, "write_row_replace");
+
+ /* Read all columns for the row we are going to replace */
+ table->use_all_columns();
+ /*
+ Don't allow REPLACE to replace a row when an auto_increment column
+ was used. This ensures that we don't get a problem when the
+ whole range of the key has been used.
+ */
+ if (info->handle_duplicates == DUP_REPLACE && table->next_number_field &&
+ key_nr == table->s->next_number_index && insert_id_for_cur_row > 0)
+ return on_error(error);
+
+ error= locate_dup_record();
+ if (error)
+ return on_error(error);
+
+ return 0;
+}
+
+int Write_record::insert_with_duplicates()
+{
+ while (unlikely(error=table->file->ha_write_row(table->record[0])))
+ {
+ error=prepare_handle_duplicate(error);
+ if (unlikely(error))
+ return error;
+ if (info->handle_duplicates == DUP_UPDATE)
+ {
+ return update_duplicate();
+ }
+ else /* DUP_REPLACE */
+ {
+ error= replace_row();
+ if (unlikely(error))
+ return error;
+ }
+ }
+
+ after_insert();
+ return after_trg_error;
+}
+
+void Write_record::notify_non_trans_table_modified()
+{
+ if (!table->file->has_transactions_and_rollback())
+ thd->transaction->stmt.modified_non_trans_table= TRUE;
+}
+
+void Write_record::after_insert()
+{
+ after_trg_and_copied_inc();
+ send_data();
+ notify_non_trans_table_modified();
+}
+
+void Write_record::after_update()
+{
+ send_data();
+ notify_non_trans_table_modified();
+}
+void Write_record::after_trg_and_copied_inc()
+{
info->copied++;
thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
- trg_error= (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
- TRG_ACTION_AFTER, TRUE));
+ after_trg_error= (use_triggers &&
+ table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
+ TRG_ACTION_AFTER, TRUE));
+}
+
+
-ok:
+void Write_record::send_data()
+{
/*
We send the row after writing it to the table so that the
correct values are sent to the client. Otherwise it won't show
@@ -2247,28 +2282,11 @@ ok:
values updated in ON DUPLICATE KEY UPDATE.
*/
if (sink && sink->send_data(thd->lex->returning()->item_list) < 0)
- trg_error= 1;
-
-after_trg_or_ignored_err:
- if (key)
- my_safe_afree(key,table->s->max_unique_length);
- if (!table->file->has_transactions_and_rollback())
- thd->transaction->stmt.modified_non_trans_table= TRUE;
- DBUG_RETURN(trg_error);
-
-err:
- info->last_errno= error;
- table->file->print_error(error,MYF(0));
-
-before_trg_err:
- table->file->restore_auto_increment(prev_insert_id);
- if (key)
- my_safe_afree(key, table->s->max_unique_length);
- table->column_bitmaps_set(save_read_set, save_write_set);
- DBUG_RETURN(1);
+ after_trg_error= 1;
}
+
/******************************************************************************
Check that there aren't any null_fields
******************************************************************************/
diff --git a/sql/sql_insert.h b/sql/sql_insert.h
index c70770e1ef3..062bb834026 100644
--- a/sql/sql_insert.h
+++ b/sql/sql_insert.h
@@ -24,7 +24,7 @@ typedef List<Item> List_item;
typedef struct st_copy_info COPY_INFO;
struct replace_execution_result
-{ int error; bool updated, before_trg_error, after_trg_error; };
+{ ; };
int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
List<Item> &fields, List_item *values,
@@ -43,16 +43,125 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
int vers_insert_history_row(TABLE *table);
int check_duplic_insert_without_overlaps(THD *thd, TABLE *table,
enum_duplicates duplic);
-int locate_dup_record(THD *thd, TABLE *table, uchar *&key, uint keynum);
-replace_execution_result replace_row(TABLE *table, uint key_nr,
- COPY_INFO *info, bool use_triggers,
- bool versioned);
int write_record(THD *thd, TABLE *table, COPY_INFO *info,
select_result *returning= NULL);
void kill_delayed_threads(void);
bool binlog_create_table(THD *thd, TABLE *table, bool replace);
bool binlog_drop_table(THD *thd, TABLE *table);
+
+class Write_record
+{
+ THD *thd;
+ TABLE *table;
+ COPY_INFO *info;
+ uint key_nr;
+ select_result *sink;
+ uchar *key;
+
+ MY_BITMAP *save_read_set, *save_write_set;
+ ulonglong prev_insert_id;
+ ulonglong insert_id_for_cur_row= 0;
+ const uint last_unique_key;
+ const bool use_triggers;
+ const bool versioned;
+ const bool has_delete_triggers;
+ const bool referenced_by_fk;
+ static uint get_last_unique_key(TABLE *table);
+ // FINALIZATION
+ void notify_non_trans_table_modified();
+ void after_insert();
+ void after_update();
+ void after_trg_and_copied_inc();
+ void send_data();
+ int on_error(int error)
+ {
+ info->last_errno= error;
+ table->file->print_error(error,MYF(0));
+ return restore_on_error();
+ }
+ int restore_on_error()
+ {
+ table->file->restore_auto_increment(prev_insert_id);
+ table->column_bitmaps_set(save_read_set, save_write_set);
+ return 1;
+ }
+ int after_trg_error= 0;
+protected:
+ int prepare_handle_duplicate(int error);
+ int locate_dup_record();
+
+
+ template<bool can_optimize>
+ int replace_row();
+ int update_duplicate();
+
+ int single_insert();
+ int insert_with_duplicates();
+public:
+
+ /**
+
+ @param thd thread context
+ @param info COPY_INFO structure describing handling of duplicates
+ and which is used for counting number of records inserted
+ and deleted.
+ @param sink result sink for the RETURNING clause
+ @param table
+ @param versioned
+ @param use_triggers
+ */
+ Write_record(THD *thd, TABLE *table, COPY_INFO *info,
+ bool versioned, bool use_triggers, select_result *sink):
+ thd(thd), table(table), info(info), sink(sink),
+ last_unique_key(get_last_unique_key(table)),
+ use_triggers(use_triggers), versioned(versioned),
+ has_delete_triggers(use_triggers &&
+ table->triggers->has_delete_triggers()),
+ referenced_by_fk(table->file->referenced_by_foreign_key())
+ {
+
+ switch(info->handle_duplicates){
+ case DUP_ERROR:
+ write_record= &Write_record::single_insert;
+ break;
+ case DUP_UPDATE:
+ write_record= &Write_record::update_duplicate;
+ break;
+ case DUP_REPLACE:
+ write_record=
+ !referenced_by_fk && has_delete_triggers ?
+ &Write_record::replace_row<true>:
+ &Write_record::replace_row<false>;
+ }
+ }
+ /**
+ Write a record to table with optional deleting of conflicting records,
+ invoke proper triggers if needed.
+
+ SYNOPSIS
+ write_record()
+ thd - thread context
+ info - COPY_INFO structure describing handling of duplicates
+ and which is used for counting number of records inserted
+ and deleted.
+ sink - result sink for the RETURNING clause
+
+ NOTE
+ Once this record will be written to table after insert trigger will
+ be invoked. If instead of inserting new record we will update old one
+ then both on update triggers will work instead. Similarly both on
+ delete triggers will be invoked if we will delete conflicting records.
+
+ Sets thd->transaction.stmt.modified_non_trans_table to TRUE if table which
+ is updated didn't have transactions.
+
+ RETURN VALUE
+ 0 - success
+ non-0 - error
+ */
+ int (Write_record::*write_record)();
+};
#ifdef EMBEDDED_LIBRARY
inline void kill_delayed_threads(void) {}
#endif