diff options
author | Kentoku <kentokushiba@gmail.com> | 2019-11-08 10:27:07 +0900 |
---|---|---|
committer | Kentoku <kentokushiba@gmail.com> | 2019-11-09 05:57:16 +0900 |
commit | 0cf2ebd18acd945b04111133ccbb479cbc81d405 (patch) | |
tree | 769a262ef5322079f594cd6665aede8b065221e4 /sql | |
parent | c24ec3cece6d8bf70dac7519b6fd397c464f7a82 (diff) | |
download | mariadb-git-bb-10.4-MDEV-18973.tar.gz |
MDEV-18973 CLIENT_FOUND_ROWS wrong in spiderbb-10.4-MDEV-18973
Get count from last_used_con->info
Contributed by willhan at Tencent Games
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_partition.cc | 35 | ||||
-rw-r--r-- | sql/ha_partition.h | 4 | ||||
-rw-r--r-- | sql/handler.cc | 15 | ||||
-rw-r--r-- | sql/handler.h | 25 | ||||
-rw-r--r-- | sql/sql_insert.cc | 54 | ||||
-rw-r--r-- | sql/sql_load.cc | 2 | ||||
-rw-r--r-- | sql/sql_update.cc | 14 |
7 files changed, 129 insertions, 20 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index baf8f88c00c..256a9fff69d 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4270,6 +4270,13 @@ void ha_partition::try_semi_consistent_read(bool yes) int ha_partition::write_row(const uchar * buf) { + DBUG_ENTER("ha_partition::write_row"); + DBUG_RETURN(write_row_ext(buf, NULL)); +} + + +int ha_partition::write_row_ext(const uchar * buf, COPY_INFO *info) +{ uint32 part_id; int error; longlong func_value; @@ -4278,7 +4285,7 @@ int ha_partition::write_row(const uchar * buf) THD *thd= ha_thd(); sql_mode_t saved_sql_mode= thd->variables.sql_mode; bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; - DBUG_ENTER("ha_partition::write_row"); + DBUG_ENTER("ha_partition::write_row_ext"); DBUG_PRINT("enter", ("partition this: %p", this)); /* @@ -4336,7 +4343,8 @@ int ha_partition::write_row(const uchar * buf) start_part_bulk_insert(thd, part_id); tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ - error= m_file[part_id]->ha_write_row(buf); + error= info ? m_file[part_id]->ha_write_row_ext(buf, info) : + m_file[part_id]->ha_write_row(buf); if (have_auto_increment && !table->s->next_number_keypart) set_auto_increment_if_higher(table->next_number_field); reenable_binlog(thd); @@ -4839,9 +4847,16 @@ ha_rows ha_partition::guess_bulk_insert_rows() int ha_partition::end_bulk_insert() { + DBUG_ENTER("ha_partition::end_bulk_insert"); + DBUG_RETURN(end_bulk_insert_ext(NULL)); +} + + +int ha_partition::end_bulk_insert_ext(COPY_INFO *info) +{ int error= 0; uint i; - DBUG_ENTER("ha_partition::end_bulk_insert"); + DBUG_ENTER("ha_partition::end_bulk_insert_ext"); if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) DBUG_RETURN(error); @@ -4851,7 +4866,8 @@ int ha_partition::end_bulk_insert() i= bitmap_get_next_set(&m_bulk_insert_started, i)) { int tmp; - if ((tmp= m_file[i]->ha_end_bulk_insert())) + if ((tmp= info ? m_file[i]->ha_end_bulk_insert_ext(info) : + m_file[i]->ha_end_bulk_insert())) error= tmp; } bitmap_clear_all(&m_bulk_insert_started); @@ -11420,11 +11436,13 @@ int ha_partition::pre_direct_update_rows_init(List<Item> *update_fields) 0 Success */ -int ha_partition::direct_update_rows(ha_rows *update_rows_result) +int ha_partition::direct_update_rows(ha_rows *update_rows_result, + ha_rows *found_rows_result) { int error; bool rnd_seq= FALSE; ha_rows update_rows= 0; + ha_rows found_rows= 0; uint32 i; DBUG_ENTER("ha_partition::direct_update_rows"); @@ -11436,6 +11454,7 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) } *update_rows_result= 0; + *found_rows_result= 0; for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) { handler *file= m_file[i]; @@ -11451,7 +11470,8 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) } if (unlikely((error= (m_pre_calling ? (file)->pre_direct_update_rows() : - (file)->ha_direct_update_rows(&update_rows))))) + (file)->ha_direct_update_rows(&update_rows, + &found_rows))))) { if (rnd_seq) { @@ -11463,6 +11483,7 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) DBUG_RETURN(error); } *update_rows_result+= update_rows; + *found_rows_result+= found_rows; } if (rnd_seq) { @@ -11498,7 +11519,7 @@ int ha_partition::pre_direct_update_rows() DBUG_ENTER("ha_partition::pre_direct_update_rows"); save_m_pre_calling= m_pre_calling; m_pre_calling= TRUE; - error= direct_update_rows(¬_used); + error= direct_update_rows(¬_used, ¬_used); m_pre_calling= save_m_pre_calling; DBUG_RETURN(error); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 598c63837c7..fea4d284e06 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -649,6 +649,7 @@ public: number of calls to write_row. */ virtual int write_row(const uchar * buf); + virtual int write_row_ext(const uchar * buf, COPY_INFO *info); virtual bool start_bulk_update(); virtual int exec_bulk_update(ha_rows *dup_key_found); virtual int end_bulk_update(); @@ -657,7 +658,7 @@ public: virtual int update_row(const uchar * old_data, const uchar * new_data); virtual int direct_update_rows_init(List<Item> *update_fields); virtual int pre_direct_update_rows_init(List<Item> *update_fields); - virtual int direct_update_rows(ha_rows *update_rows); + virtual int direct_update_rows(ha_rows *update_rows, ha_rows *found_rows); virtual int pre_direct_update_rows(); virtual bool start_bulk_delete(); virtual int end_bulk_delete(); @@ -670,6 +671,7 @@ public: virtual int truncate(); virtual void start_bulk_insert(ha_rows rows, uint flags); virtual int end_bulk_insert(); + virtual int end_bulk_insert_ext(COPY_INFO *info); private: ha_rows guess_bulk_insert_rows(); void start_part_bulk_insert(THD *thd, uint part_id); diff --git a/sql/handler.cc b/sql/handler.cc index 72b11098060..cb7ac02a1ea 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6652,11 +6652,18 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar * int handler::ha_write_row(const uchar *buf) { + DBUG_ENTER("handler::ha_write_row"); + DBUG_RETURN(ha_write_row_ext(buf, NULL)); +} + + +int handler::ha_write_row_ext(const uchar *buf, COPY_INFO *info) +{ int error; Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type == F_WRLCK); - DBUG_ENTER("handler::ha_write_row"); + DBUG_ENTER("handler::ha_write_row_ext"); DEBUG_SYNC_C("ha_write_row_start"); MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str); @@ -6672,7 +6679,7 @@ int handler::ha_write_row(const uchar *buf) DBUG_RETURN(error); } TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_WRITE_ROW, MAX_KEY, 0, - { error= write_row(buf); }) + { error= info ? write_row_ext(buf, info) : write_row(buf); }) MYSQL_INSERT_ROW_DONE(error); if (likely(!error) && !row_already_logged) @@ -6812,14 +6819,14 @@ int handler::ha_delete_row(const uchar *buf) @retval != 0 Failure. */ -int handler::ha_direct_update_rows(ha_rows *update_rows) +int handler::ha_direct_update_rows(ha_rows *update_rows, ha_rows *found_rows) { int error; MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str); mark_trx_read_write(); - error = direct_update_rows(update_rows); + error = direct_update_rows(update_rows, found_rows); MYSQL_UPDATE_ROW_DONE(error); return error; } diff --git a/sql/handler.h b/sql/handler.h index 2d25568488b..07ef73b9b27 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -2927,6 +2927,9 @@ public: }; +typedef struct st_copy_info COPY_INFO; + + /** The handler class is the interface for dynamically loadable storage engines. Do not add ifdefs and take care when adding or @@ -3248,6 +3251,7 @@ public: */ int ha_external_lock(THD *thd, int lock_type); int ha_write_row(const uchar * buf); + int ha_write_row_ext(const uchar * buf, COPY_INFO *info); int ha_update_row(const uchar * old_data, const uchar * new_data); int ha_delete_row(const uchar * buf); void ha_release_auto_increment(); @@ -3286,6 +3290,13 @@ public: int ret= end_bulk_insert(); DBUG_RETURN(ret); } + int ha_end_bulk_insert_ext(COPY_INFO *info) + { + DBUG_ENTER("handler::ha_end_bulk_insert_ext"); + estimation_rows_to_insert= 0; + int ret= end_bulk_insert_ext(info); + DBUG_RETURN(ret); + } int ha_bulk_update_row(const uchar *old_data, const uchar *new_data, ha_rows *dup_key_found); int ha_delete_all_rows(); @@ -4571,6 +4582,11 @@ private: { return HA_ERR_WRONG_COMMAND; } + virtual int write_row_ext(const uchar *buf, + COPY_INFO *info __attribute__((unused))) + { + return write_row(buf); + } /** Update a single row. @@ -4599,7 +4615,7 @@ private: /* Perform initialization for a direct update request */ public: - int ha_direct_update_rows(ha_rows *update_rows); + int ha_direct_update_rows(ha_rows *update_rows, ha_rows *found_rows); virtual int direct_update_rows_init(List<Item> *update_fields) { return HA_ERR_WRONG_COMMAND; @@ -4609,7 +4625,8 @@ private: { return HA_ERR_WRONG_COMMAND; } - virtual int direct_update_rows(ha_rows *update_rows __attribute__((unused))) + virtual int direct_update_rows(ha_rows *update_rows __attribute__((unused)), + ha_rows *found_rows __attribute__((unused))) { return HA_ERR_WRONG_COMMAND; } @@ -4692,6 +4709,10 @@ private: } virtual void start_bulk_insert(ha_rows rows, uint flags) {} virtual int end_bulk_insert() { return 0; } + virtual int end_bulk_insert_ext(COPY_INFO *info) + { + return end_bulk_insert(); + } protected: virtual int index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 3ad3cf9151f..7c807e4d454 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1107,7 +1107,8 @@ values_loop_end: auto_inc values from the delayed_insert thread as they share TABLE. */ table->file->ha_release_auto_increment(); - if (using_bulk_insert && unlikely(table->file->ha_end_bulk_insert()) && + if (using_bulk_insert && + unlikely(table->file->ha_end_bulk_insert_ext(&info)) && !error) { table->file->print_error(my_errno,MYF(0)); @@ -1234,6 +1235,9 @@ values_loop_end: retval= thd->lex->explain->send_explain(thd); goto abort; } + DBUG_PRINT("info", ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info.touched, (ulonglong) info.copied, + (ulonglong) info.updated, (ulonglong) info.deleted)); if ((iteration * values_list.elements) == 1 && (!(thd->variables.option_bits & OPTION_WARNINGS) || !thd->cuted_fields)) { @@ -1709,7 +1713,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (info->handle_duplicates == DUP_REPLACE || info->handle_duplicates == DUP_UPDATE) { - while (unlikely(error=table->file->ha_write_row(table->record[0]))) + while (unlikely(error=table->file->ha_write_row_ext(table->record[0], + info))) { uint key_nr; /* @@ -1850,6 +1855,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->file->restore_auto_increment(); info->touched++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); if (different_records) { if (unlikely(error=table->file->ha_update_row(table->record[1], @@ -1870,6 +1879,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (error != HA_ERR_RECORD_IS_THE_SAME) { info->updated++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); if (table->versioned()) { if (table->versioned(VERS_TIMESTAMP)) @@ -1886,10 +1899,18 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) restore_record(table, record[2]); } info->copied++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, + (ulonglong) info->deleted)); } } else + { + DBUG_PRINT("info", ("HA_ERR_RECORD_IS_THE_SAME")); error= 0; + } /* If ON DUP KEY UPDATE updates a row instead of inserting one, it's like a regular UPDATE statement: it should not @@ -1904,6 +1925,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)); info->copied++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); } /* @@ -1959,6 +1984,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (likely(!error)) { info->deleted++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); if (table->versioned(VERS_TIMESTAMP)) { store_record(table, record[2]); @@ -1969,7 +1998,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } } else + { + DBUG_PRINT("info", ("HA_ERR_RECORD_IS_THE_SAME")); error= 0; // error was HA_ERR_RECORD_IS_THE_SAME + } /* Since we pretend that we have done insert we should call its after triggers. @@ -1997,9 +2029,21 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (unlikely(error)) goto err; if (!table->versioned(VERS_TIMESTAMP)) + { info->deleted++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); + } else + { info->updated++; + DBUG_PRINT("info", + ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); + } if (!table->file->has_transactions()) thd->transaction.stmt.modified_non_trans_table= TRUE; if (table->triggers && @@ -2033,7 +2077,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->write_set != save_write_set) table->column_bitmaps_set(save_read_set, save_write_set); } - else if (unlikely((error=table->file->ha_write_row(table->record[0])))) + else if (unlikely((error=table->file->ha_write_row_ext(table->record[0], + info)))) { DEBUG_SYNC(thd, "write_row_noreplace"); if (!info->ignore || @@ -2048,6 +2093,9 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) after_trg_n_copied_inc: info->copied++; + DBUG_PRINT("info", ("touched:%llu copied:%llu updated:%llu deleted:%llu", + (ulonglong) info->touched, (ulonglong) info->copied, + (ulonglong) info->updated, (ulonglong) info->deleted)); thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); trg_error= (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_INSERT, diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 89cc3f8da64..98f895af8be 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -677,7 +677,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, if (likely(!error)) thd_progress_next_stage(thd); if (thd->locked_tables_mode <= LTM_LOCK_TABLES && - table->file->ha_end_bulk_insert() && !error) + table->file->ha_end_bulk_insert_ext(&info) && !error) { table->file->print_error(my_errno, MYF(0)); error= 1; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index f5bb298fdba..cfe3ce1e4f3 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -718,6 +718,11 @@ int mysql_update(THD *thd, Later we also ensure that we are only using one table (no sub queries) */ + DBUG_PRINT("info", ("HA_CAN_DIRECT_UPDATE_AND_DELETE: %s", (table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) ? "TRUE" : "FALSE")); + DBUG_PRINT("info", ("using_io_buffer: %s", query_plan.using_io_buffer ? "TRUE" : "FALSE")); + DBUG_PRINT("info", ("ignore: %s", ignore ? "TRUE" : "FALSE")); + DBUG_PRINT("info", ("virtual_columns_marked_for_read: %s", table->check_virtual_columns_marked_for_read() ? "TRUE" : "FALSE")); + DBUG_PRINT("info", ("virtual_columns_marked_for_write: %s", table->check_virtual_columns_marked_for_write() ? "TRUE" : "FALSE")); if ((table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) && !has_triggers && !binlog_is_row && !query_plan.using_io_buffer && !ignore && @@ -928,11 +933,16 @@ update_begin: if (do_direct_update) { /* Direct updating is supported */ + ha_rows update_rows= 0, found_rows= 0; DBUG_PRINT("info", ("Using direct update")); table->reset_default_fields(); - if (unlikely(!(error= table->file->ha_direct_update_rows(&updated)))) + if (unlikely(!(error= table->file->ha_direct_update_rows(&update_rows, + &found_rows)))) error= -1; - found= updated; + updated= update_rows; + found= found_rows; + if (found < updated) + found= updated; goto update_end; } |