diff options
author | Kentoku <kentokushiba@gmail.com> | 2019-11-29 08:22:13 +0900 |
---|---|---|
committer | Kentoku <kentokushiba@gmail.com> | 2019-11-29 23:23:57 +0900 |
commit | e066723a4149b05f212850dcf8ecf55b3ce2524d (patch) | |
tree | f790ac3d0bff3938429eefb8c20900e310fcec40 /sql/ha_partition.cc | |
parent | 3826178da89f987ebf641bcd92d4a714d51b3ecb (diff) | |
download | mariadb-git-bb-10.4-MDEV-18973_2.tar.gz |
MDEV-18973 CLIENT_FOUND_ROWS wrong in spiderbb-10.4-MDEV-18973_2
Get count from last_used_con->info
Contributed by willhan at Tencent Games
Diffstat (limited to 'sql/ha_partition.cc')
-rw-r--r-- | sql/ha_partition.cc | 51 |
1 files changed, 48 insertions, 3 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 8dd808065ce..d4d411f1ad3 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4822,6 +4822,42 @@ ha_rows ha_partition::guess_bulk_insert_rows() } +void ha_partition::sum_copy_info(handler *file) +{ + copy_info.records+= file->copy_info.records; + copy_info.touched+= file->copy_info.touched; + copy_info.copied+= file->copy_info.copied; + copy_info.deleted+= file->copy_info.deleted; + copy_info.updated+= file->copy_info.updated; +} + + +void ha_partition::sum_copy_infos() +{ + handler **file_array; + bzero(©_info, sizeof(copy_info)); + file_array= m_file; + do + { + if (bitmap_is_set(&(m_opened_partitions), (uint)(file_array - m_file))) + sum_copy_info(*file_array); + } while (*(++file_array)); +} + +void ha_partition::reset_copy_info() +{ + handler **file_array; + bzero(©_info, sizeof(copy_info)); + file_array= m_file; + do + { + if (bitmap_is_set(&(m_opened_partitions), (uint)(file_array - m_file))) + bzero(&(*file_array)->copy_info, sizeof(copy_info)); + } while (*(++file_array)); +} + + + /* Finish a large batch of insert rows @@ -4853,6 +4889,7 @@ int ha_partition::end_bulk_insert() int tmp; if ((tmp= m_file[i]->ha_end_bulk_insert())) error= tmp; + sum_copy_info(m_file[i]); } bitmap_clear_all(&m_bulk_insert_started); DBUG_RETURN(error); @@ -11164,6 +11201,7 @@ bool ha_partition::start_bulk_update() do { + bzero(&(*file)->copy_info, sizeof((*file)->copy_info)); if ((*file)->start_bulk_update()) DBUG_RETURN(TRUE); } while (*(++file)); @@ -11221,6 +11259,7 @@ int ha_partition::end_bulk_update() if ((tmp= (*file)->end_bulk_update())) error= tmp; } while (*(++file)); + sum_copy_infos(); DBUG_RETURN(error); } @@ -11317,6 +11356,7 @@ int ha_partition::end_bulk_delete() if ((tmp= (*file)->end_bulk_delete())) error= tmp; } while (*(++file)); + sum_copy_infos(); DBUG_RETURN(error); } @@ -11433,11 +11473,13 @@ int ha_partition::pre_direct_update_rows_init(List<Item> *update_fields) 0 Success */ -int ha_partition::direct_update_rows(ha_rows *update_rows_result) +int ha_partition::direct_update_rows(ha_rows *update_rows_result, + ha_rows *found_rows_result) { int error; bool rnd_seq= FALSE; ha_rows update_rows= 0; + ha_rows found_rows= 0; uint32 i; DBUG_ENTER("ha_partition::direct_update_rows"); @@ -11449,6 +11491,7 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) } *update_rows_result= 0; + *found_rows_result= 0; for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) { handler *file= m_file[i]; @@ -11464,7 +11507,8 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) } if (unlikely((error= (m_pre_calling ? (file)->pre_direct_update_rows() : - (file)->ha_direct_update_rows(&update_rows))))) + (file)->ha_direct_update_rows(&update_rows, + &found_rows))))) { if (rnd_seq) { @@ -11476,6 +11520,7 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) DBUG_RETURN(error); } *update_rows_result+= update_rows; + *found_rows_result+= found_rows; } if (rnd_seq) { @@ -11511,7 +11556,7 @@ int ha_partition::pre_direct_update_rows() DBUG_ENTER("ha_partition::pre_direct_update_rows"); save_m_pre_calling= m_pre_calling; m_pre_calling= TRUE; - error= direct_update_rows(¬_used); + error= direct_update_rows(¬_used, ¬_used); m_pre_calling= save_m_pre_calling; DBUG_RETURN(error); } |