diff options
-rw-r--r-- | mysql-test/suite/archive/archive_eits.result | 24 | ||||
-rw-r--r-- | mysql-test/suite/archive/archive_eits.test | 32 | ||||
-rw-r--r-- | storage/archive/ha_archive.cc | 50 | ||||
-rw-r--r-- | storage/archive/ha_archive.h | 3 |
4 files changed, 92 insertions, 17 deletions
diff --git a/mysql-test/suite/archive/archive_eits.result b/mysql-test/suite/archive/archive_eits.result new file mode 100644 index 00000000000..e077c2e4954 --- /dev/null +++ b/mysql-test/suite/archive/archive_eits.result @@ -0,0 +1,24 @@ +drop table if exists t1; +# +# MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command +# +CREATE TABLE t1 (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT)engine=archive; +INSERT INTO t1 VALUES +(101, PointFromText('POINT(10 10)')), +(102, PointFromText('POINT(20 10)')), +(103, PointFromText('POINT(20 20)')), +(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); +set @tmp1= @@optimizer_use_condition_selectivity; +set @tmp2= @@use_stat_tables; +set optimizer_use_condition_selectivity=4; +set use_stat_tables=PREFERABLY; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze note The storage engine for the table doesn't support analyze +select * from mysql.table_stats where table_name='t1' and db_name=database(); +db_name table_name cardinality +test t1 4 +drop table t1; +set optimizer_use_condition_selectivity=@tmp1; +set use_stat_tables=@tmp2; diff --git a/mysql-test/suite/archive/archive_eits.test b/mysql-test/suite/archive/archive_eits.test new file mode 100644 index 00000000000..04c4ccdb709 --- /dev/null +++ b/mysql-test/suite/archive/archive_eits.test @@ -0,0 +1,32 @@ +-- source include/have_archive.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +--echo # +--echo # MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command +--echo # + +CREATE TABLE t1 (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT)engine=archive; +INSERT INTO t1 VALUES +(101, PointFromText('POINT(10 10)')), +(102, PointFromText('POINT(20 10)')), +(103, PointFromText('POINT(20 20)')), +(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); + +set @tmp1= @@optimizer_use_condition_selectivity; +set @tmp2= @@use_stat_tables; + +set optimizer_use_condition_selectivity=4; +set use_stat_tables=PREFERABLY; +ANALYZE TABLE t1; + +select * from mysql.table_stats where table_name='t1' and db_name=database(); + +drop table t1; + +set optimizer_use_condition_selectivity=@tmp1; +set use_stat_tables=@tmp2; + + diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index fd0a6b7d4fb..d14a3030a5d 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1652,7 +1652,6 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) DBUG_VOID_RETURN; } - /* Hints for optimizer, see ha_tina for more information */ @@ -1660,22 +1659,7 @@ int ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); - mysql_mutex_lock(&share->mutex); - if (share->dirty) - { - DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); - DBUG_ASSERT(share->archive_write_open); - azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->dirty= FALSE; - } - - /* - This should be an accurate number now, though bulk and delayed inserts can - cause the number to be inaccurate. - */ - stats.records= share->rows_recorded; - mysql_mutex_unlock(&share->mutex); - + flush_and_clear_pending_writes(); stats.deleted= 0; DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records)); @@ -1718,6 +1702,38 @@ int ha_archive::info(uint flag) } +int ha_archive::external_lock(THD *thd, int lock_type) +{ + if (lock_type == F_RDLCK) + { + // We are going to read from the table. Flush any pending writes that we + // may have + flush_and_clear_pending_writes(); + } + return 0; +} + + +void ha_archive::flush_and_clear_pending_writes() +{ + mysql_mutex_lock(&share->mutex); + if (share->dirty) + { + DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); + DBUG_ASSERT(share->archive_write_open); + azflush(&(share->archive_write), Z_SYNC_FLUSH); + share->dirty= FALSE; + } + + /* + This should be an accurate number now, though bulk and delayed inserts can + cause the number to be inaccurate. + */ + stats.records= share->rows_recorded; + mysql_mutex_unlock(&share->mutex); +} + + /* This method tells us that a bulk insert operation is about to occur. We set a flag which will keep write_row from saying that its data is dirty. This in diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 56ff566db8c..a74374a340f 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -169,5 +169,8 @@ public: int unpack_row(azio_stream *file_to_read, uchar *record); unsigned int pack_row(uchar *record, azio_stream *writer); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); + int external_lock(THD *thd, int lock_type); +private: + void flush_and_clear_pending_writes(); }; |