summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Widenius <monty@askmonty.org>2011-06-27 19:07:24 +0300
committerMichael Widenius <monty@askmonty.org>2011-06-27 19:07:24 +0300
commitba9a890f0c389a93925c8c21a30e64801f397f18 (patch)
tree30b4ae8dda481d0c057641fef6eb1edea8b643ec
parenta6542a13abcb0e5632919b30a7bd343757fbccba (diff)
downloadmariadb-git-ba9a890f0c389a93925c8c21a30e64801f397f18.tar.gz
New status variables: Rows_tmp_read, Handler_tmp_update and Handler_tmp_write
Split status variable Rows_read to Rows_read and Rows_tmp_read so that one can see how much real data is read. Same was done with with Handler_update and Handler_write. Fixed bug in MEMORY tables where some variables was counted twice. Added new internal handler call 'ha_close()' to have one place to gather statistics. Fixed bug where thd->open_options was set to wrong value when doing admin_recreate_table() mysql-test/r/status.result: Updated test results and added new tests mysql-test/r/status_user.result: Udated test results mysql-test/t/status.test: Added new test for temporary table status variables sql/ha_partition.cc: Changed to call ha_close() instead of close() sql/handler.cc: Added internal_tmp_table variable for easy checking of temporary tables. Added new internal handler call 'ha_close()' to have one place to gather statistics. Gather statistics for internal temporary tables. sql/handler.h: Added handler variables internal_tmp_table, rows_tmp_read. Split function update_index_statistics() to two. Added ha_update_tmp_row() for faster tmp table handling with more statistics. sql/item_sum.cc: ha_write_row() -> ha_write_tmp_row() sql/multi_range_read.cc: close() -> ha_close() sql/mysqld.cc: New status variables: Rows_tmp_read, Handler_tmp_update and Handler_tmp_write sql/opt_range.cc: close() -> ha_close() sql/sql_base.cc: close() -> ha_close() sql/sql_class.cc: Added handling of rows_tmp_read sql/sql_class.h: Added new satistics variables. rows_read++ -> update_rows_read() to be able to correctly count reads to internal temp tables. Added handler::ha_update_tmp_row() sql/sql_connect.cc: Added comment sql/sql_expression_cache.cc: ha_write_row() -> ha_write_tmp_row() sql/sql_select.cc: close() -> ha_close() ha_update_row() -> ha_update_tmp_row() sql/sql_show.cc: ha_write_row() -> ha_write_tmp_row() sql/sql_table.cc: Fixed bug where thd->open_options was set to wrong value when doing admin_recreate_table() sql/sql_union.cc: ha_write_row() -> ha_write_tmp_row() sql/sql_update.cc: ha_write_row() -> ha_write_tmp_row() sql/table.cc: close() -> ha_close() storage/heap/ha_heap.cc: Removed double counting of statistic variables. close -> ha_close() to get tmp table statistics. storage/maria/ha_maria.cc: close -> ha_close() to get tmp table statistics.
-rw-r--r--mysql-test/r/status.result62
-rw-r--r--mysql-test/r/status_user.result7
-rw-r--r--mysql-test/t/status.test17
-rw-r--r--sql/ha_partition.cc8
-rw-r--r--sql/handler.cc20
-rw-r--r--sql/handler.h20
-rw-r--r--sql/item_sum.cc2
-rw-r--r--sql/multi_range_read.cc2
-rw-r--r--sql/mysqld.cc3
-rw-r--r--sql/opt_range.cc4
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_class.cc2
-rw-r--r--sql/sql_class.h25
-rw-r--r--sql/sql_connect.cc1
-rw-r--r--sql/sql_expression_cache.cc2
-rw-r--r--sql/sql_select.cc12
-rw-r--r--sql/sql_show.cc2
-rw-r--r--sql/sql_table.cc8
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/table.cc2
-rw-r--r--storage/heap/ha_heap.cc14
-rw-r--r--storage/maria/ha_maria.cc2
23 files changed, 166 insertions, 57 deletions
diff --git a/mysql-test/r/status.result b/mysql-test/r/status.result
index ce3acba9b8a..2c88345646c 100644
--- a/mysql-test/r/status.result
+++ b/mysql-test/r/status.result
@@ -156,25 +156,33 @@ Variable_name Value
Com_show_status 3
show status like 'hand%write%';
Variable_name Value
+Handler_tmp_write 0
Handler_write 0
show status like '%tmp%';
Variable_name Value
Created_tmp_disk_tables 0
Created_tmp_files 0
Created_tmp_tables 0
+Handler_tmp_update 0
+Handler_tmp_write 0
+Rows_tmp_read 5
show status like 'hand%write%';
Variable_name Value
+Handler_tmp_write 0
Handler_write 0
show status like '%tmp%';
Variable_name Value
Created_tmp_disk_tables 0
Created_tmp_files 0
Created_tmp_tables 0
+Handler_tmp_update 0
+Handler_tmp_write 0
+Rows_tmp_read 13
show status like 'com_show_status';
Variable_name Value
Com_show_status 8
rnd_diff tmp_table_diff
-20 8
+28 8
flush status;
show status like 'Com%function';
Variable_name Value
@@ -238,5 +246,57 @@ SELECT 9;
9
DROP PROCEDURE p1;
DROP FUNCTION f1;
+flush status;
+create table t1 (a int not null auto_increment primary key, g int, b blob);
+insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
+select * from t1;
+a g b
+1 1 a
+2 2 b
+3 3 b
+4 1 c
+select b, count(*) from t1 group by b;
+b count(*)
+a 1
+b 2
+c 1
+select g, count(*) from t1 group by g;
+g count(*)
+1 2
+2 1
+3 1
+show status like 'Row%';
+Variable_name Value
+Rows_read 12
+Rows_sent 10
+Rows_tmp_read 14
+show status like 'Handler%';
+Variable_name Value
+Handler_commit 0
+Handler_delete 0
+Handler_discover 0
+Handler_prepare 0
+Handler_read_first 0
+Handler_read_key 4
+Handler_read_next 0
+Handler_read_prev 0
+Handler_read_rnd 7
+Handler_read_rnd_next 23
+Handler_rollback 0
+Handler_savepoint 0
+Handler_savepoint_rollback 0
+Handler_tmp_update 2
+Handler_tmp_write 7
+Handler_update 0
+Handler_write 4
+show status like '%tmp%';
+Variable_name Value
+Created_tmp_disk_tables 1
+Created_tmp_files 0
+Created_tmp_tables 2
+Handler_tmp_update 2
+Handler_tmp_write 7
+Rows_tmp_read 34
+drop table t1;
set @@global.concurrent_insert= @old_concurrent_insert;
SET GLOBAL log_output = @old_log_output;
diff --git a/mysql-test/r/status_user.result b/mysql-test/r/status_user.result
index 636eeabfffc..0d1d028eb3d 100644
--- a/mysql-test/r/status_user.result
+++ b/mysql-test/r/status_user.result
@@ -94,6 +94,7 @@ show status like "rows%";
Variable_name Value
Rows_read 6
Rows_sent 1
+Rows_tmp_read 0
show status like "ha%";
Variable_name Value
Handler_commit 19
@@ -109,6 +110,8 @@ Handler_read_rnd_next 5
Handler_rollback 2
Handler_savepoint 0
Handler_savepoint_rollback 0
+Handler_tmp_update 0
+Handler_tmp_write 0
Handler_update 5
Handler_write 7
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
@@ -133,7 +136,7 @@ CONCURRENT_CONNECTIONS 0
ROWS_READ 6
ROWS_SENT 2
ROWS_DELETED 1
-ROWS_INSERTED 8
+ROWS_INSERTED 7
ROWS_UPDATED 5
SELECT_COMMANDS 3
UPDATE_COMMANDS 11
@@ -150,7 +153,7 @@ CONCURRENT_CONNECTIONS 0
ROWS_READ 6
ROWS_SENT 2
ROWS_DELETED 1
-ROWS_INSERTED 8
+ROWS_INSERTED 7
ROWS_UPDATED 5
SELECT_COMMANDS 3
UPDATE_COMMANDS 11
diff --git a/mysql-test/t/status.test b/mysql-test/t/status.test
index 505df0fe8dc..46e454363cc 100644
--- a/mysql-test/t/status.test
+++ b/mysql-test/t/status.test
@@ -353,6 +353,23 @@ DROP FUNCTION f1;
# End of 5.1 tests
+#
+# Test of internal temporary table status variables
+#
+
+flush status;
+create table t1 (a int not null auto_increment primary key, g int, b blob);
+insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
+select * from t1;
+select b, count(*) from t1 group by b;
+select g, count(*) from t1 group by g;
+show status like 'Row%';
+show status like 'Handler%';
+show status like '%tmp%';
+drop table t1;
+
+# End of 5.3 tests
+
# Restore global concurrent_insert value. Keep in the end of the test file.
--connection default
set @@global.concurrent_insert= @old_concurrent_insert;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index bd34cb04290..eabb10a0d5a 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1301,7 +1301,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
DBUG_RETURN(0);
error_external_lock:
- VOID(file->close());
+ VOID(file->ha_close());
error_open:
VOID(file->ha_delete_table(part_name));
error_create:
@@ -1347,7 +1347,7 @@ void ha_partition::cleanup_new_partition(uint part_count)
while ((part_count > 0) && (*file))
{
(*file)->ha_external_lock(thd, F_UNLCK);
- (*file)->close();
+ (*file)->ha_close();
/* Leave the (*file)->ha_delete_table(part_name) to the ddl-log */
@@ -2842,7 +2842,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
err_handler:
DEBUG_SYNC(ha_thd(), "partition_open_error");
while (file-- != m_file)
- (*file)->close();
+ (*file)->ha_close();
err_alloc:
bitmap_free(&m_bulk_insert_started);
if (!m_is_clone_of)
@@ -2928,7 +2928,7 @@ int ha_partition::close(void)
repeat:
do
{
- (*file)->close();
+ (*file)->ha_close();
} while (*(++file));
if (first && m_added_file && m_added_file[0])
diff --git a/sql/handler.cc b/sql/handler.cc
index 318e2e116e2..751e31fdf9f 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -2168,7 +2168,7 @@ THD *handler::ha_thd(void) const
Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
*/
int handler::ha_open(TABLE *table_arg, const char *name, int mode,
- int test_if_locked)
+ uint test_if_locked)
{
int error;
DBUG_ENTER("handler::ha_open");
@@ -2212,11 +2212,22 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
dup_ref=ref+ALIGN_SIZE(ref_length);
cached_table_flags= table_flags();
}
- rows_read= rows_changed= 0;
- memset(index_rows_read, 0, sizeof(index_rows_read));
+ reset_statistics();
+ internal_tmp_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE);
DBUG_RETURN(error);
}
+int handler::ha_close()
+{
+ DBUG_ENTER("ha_close");
+ /*
+ Increment global statistics for temporary tables.
+ In_use is 0 for tables that was closed from the table cache.
+ */
+ if (table->in_use)
+ status_var_add(table->in_use->status_var.rows_tmp_read, rows_tmp_read);
+ DBUG_RETURN(close());
+}
/* Initialize handler for random reading, with error handling */
@@ -3238,7 +3249,7 @@ int handler::rename_table(const char * from, const char * to)
void handler::drop_table(const char *name)
{
- close();
+ ha_close();
delete_table(name);
}
@@ -3757,6 +3768,7 @@ void handler::update_global_table_stats()
TABLE_STATS * table_stats;
status_var_add(table->in_use->status_var.rows_read, rows_read);
+ DBUG_ASSERT(rows_tmp_read == 0);
if (!table->in_use->userstat_running)
{
diff --git a/sql/handler.h b/sql/handler.h
index 4707aabbd52..63e7c567b29 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1,4 +1,5 @@
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+ Copyright 2009-2011 Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1599,6 +1600,7 @@ public:
KEY_PART_INFO *range_key_part;
int key_compare_result_on_equal;
bool eq_range;
+ bool internal_tmp_table; /* If internal tmp table */
/*
TRUE <=> the engine guarantees that returned records are within the range
@@ -1643,6 +1645,7 @@ public:
*/
/* Statistics variables */
ulonglong rows_read;
+ ulonglong rows_tmp_read;
ulonglong rows_changed;
/* One bigger than needed to avoid to test if key == MAX_KEY */
ulonglong index_rows_read[MAX_KEY+1];
@@ -1685,7 +1688,7 @@ public:
}
/* ha_ methods: pubilc wrappers for private virtual API */
- int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
+ int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked);
int ha_index_init(uint idx, bool sorted)
{
int result;
@@ -1809,7 +1812,7 @@ public:
uint get_dup_key(int error);
void reset_statistics()
{
- rows_read= rows_changed= 0;
+ rows_read= rows_changed= rows_tmp_read= 0;
bzero(index_rows_read, sizeof(index_rows_read));
}
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
@@ -1894,7 +1897,7 @@ public:
*/
uint get_index(void) const
{ return inited == INDEX ? active_index : MAX_KEY; }
- virtual int close(void)=0;
+ int ha_close(void);
/**
@retval 0 Bulk update used by handler
@@ -1970,10 +1973,18 @@ protected:
virtual int index_last(uchar * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
+ virtual int close(void)=0;
+ inline void update_rows_read()
+ {
+ if (likely(!internal_tmp_table))
+ rows_read++;
+ else
+ rows_tmp_read++;
+ }
inline void update_index_statistics()
{
index_rows_read[active_index]++;
- rows_read++;
+ update_rows_read();
}
public:
@@ -2604,6 +2615,7 @@ public:
virtual handlerton *partition_ht() const
{ return ht; }
inline int ha_write_tmp_row(uchar *buf);
+ inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data);
};
#include "multi_range_read.h"
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index e7fe2095481..bc4ab876b55 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -2587,7 +2587,7 @@ bool Item_sum_count_distinct::add()
*/
return tree->unique_add(table->record[0] + table->s->null_bytes);
}
- if ((error= table->file->ha_write_row(table->record[0])) &&
+ if ((error= table->file->ha_write_tmp_row(table->record[0])) &&
table->file->is_fatal_error(error, HA_CHECK_DUP))
return TRUE;
return FALSE;
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index c666c571787..f0a1987c383 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -1075,7 +1075,7 @@ void DsMrr_impl::close_second_handler()
{
secondary_file->ha_index_or_rnd_end();
secondary_file->ha_external_lock(current_thd, F_UNLCK);
- secondary_file->close();
+ secondary_file->ha_close();
delete secondary_file;
secondary_file= NULL;
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 5dcbaa0b13b..6cbd73f4a08 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -8266,6 +8266,8 @@ SHOW_VAR status_vars[]= {
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
+ {"Handler_tmp_update", (char*) offsetof(STATUS_VAR, ha_tmp_update_count), SHOW_LONG_STATUS},
+ {"Handler_tmp_write", (char*) offsetof(STATUS_VAR, ha_tmp_write_count), SHOW_LONG_STATUS},
{"Key", (char*) &show_default_keycache, SHOW_FUNC},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
@@ -8280,6 +8282,7 @@ SHOW_VAR status_vars[]= {
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
{"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
+ {"Rows_tmp_read", (char*) offsetof(STATUS_VAR, rows_tmp_read), SHOW_LONGLONG_STATUS},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index dc987e196f4..3b672d6b704 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1804,7 +1804,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
free_file));
file->ha_external_lock(current_thd, F_UNLCK);
- file->close();
+ file->ha_close();
delete file;
}
}
@@ -1999,7 +1999,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
if (init() || reset())
{
file->ha_external_lock(thd, F_UNLCK);
- file->close();
+ file->ha_close();
goto failure;
}
free_file= TRUE;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 57775146b90..fee407f9e0e 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -675,7 +675,7 @@ void close_handle_and_leave_table_as_lock(TABLE *table)
*/
if (table->child_l || table->parent)
detach_merge_children(table, FALSE);
- table->file->close();
+ table->file->ha_close();
table->db_stat= 0; // Mark file closed
release_table_share(table->s, RELEASE_NORMAL);
table->s= share;
@@ -3708,7 +3708,7 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
if (table->db_stat)
{
table->db_stat= 0;
- table->file->close();
+ table->file->ha_close();
}
}
else
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index a7a3e15965e..3bd1e591054 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1201,6 +1201,7 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
to_var->bytes_sent+= from_var->bytes_sent;
to_var->rows_read+= from_var->rows_read;
to_var->rows_sent+= from_var->rows_sent;
+ to_var->rows_tmp_read+= from_var->rows_tmp_read;
to_var->binlog_bytes_written+= from_var->binlog_bytes_written;
to_var->cpu_time+= from_var->cpu_time;
to_var->busy_time+= from_var->busy_time;
@@ -1236,6 +1237,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
to_var->rows_read+= from_var->rows_read - dec_var->rows_read;
to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent;
+ to_var->rows_tmp_read+= from_var->rows_tmp_read - dec_var->rows_tmp_read;
to_var->binlog_bytes_written+= from_var->binlog_bytes_written -
dec_var->binlog_bytes_written;
to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index b02f0daa6fe..6f2852bf431 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -530,6 +530,9 @@ typedef struct system_status_var
ulong ha_rollback_count;
ulong ha_update_count;
ulong ha_write_count;
+ /* The following are for internal temporary tables */
+ ulong ha_tmp_update_count;
+ ulong ha_tmp_write_count;
ulong ha_prepare_count;
ulong ha_discover_count;
ulong ha_savepoint_count;
@@ -582,6 +585,7 @@ typedef struct system_status_var
ulonglong bytes_sent;
ulonglong rows_read;
ulonglong rows_sent;
+ ulonglong rows_tmp_read;
ulonglong binlog_bytes_written;
double last_query_cost;
double cpu_time, busy_time;
@@ -3610,7 +3614,7 @@ inline int handler::ha_index_read_idx_map(uchar * buf, uint index,
int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
if (!error)
{
- rows_read++;
+ update_rows_read();
index_rows_read[index]++;
}
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -3677,7 +3681,8 @@ inline int handler::ha_ft_read(uchar *buf)
{
int error= ft_read(buf);
if (!error)
- rows_read++;
+ update_rows_read();
+
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
@@ -3687,7 +3692,7 @@ inline int handler::ha_rnd_next(uchar *buf)
increment_statistics(&SSV::ha_read_rnd_next_count);
int error= rnd_next(buf);
if (!error)
- rows_read++;
+ update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
@@ -3697,7 +3702,7 @@ inline int handler::ha_rnd_pos(uchar *buf, uchar *pos)
increment_statistics(&SSV::ha_read_rnd_count);
int error= rnd_pos(buf, pos);
if (!error)
- rows_read++;
+ update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
@@ -3706,7 +3711,7 @@ inline int handler::ha_rnd_pos_by_record(uchar *buf)
{
int error= rnd_pos_by_record(buf);
if (!error)
- rows_read++;
+ update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
@@ -3715,15 +3720,21 @@ inline int handler::ha_read_first_row(uchar *buf, uint primary_key)
{
int error= read_first_row(buf, primary_key);
if (!error)
- rows_read++;
+ update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
inline int handler::ha_write_tmp_row(uchar *buf)
{
- increment_statistics(&SSV::ha_write_count);
+ increment_statistics(&SSV::ha_tmp_write_count);
return write_row(buf);
}
+inline int handler::ha_update_tmp_row(const uchar *old_data, uchar *new_data)
+{
+ increment_statistics(&SSV::ha_tmp_update_count);
+ return update_row(old_data, new_data);
+}
+
#endif /* MYSQL_SERVER */
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 05e33826da9..debae2dd74d 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -690,6 +690,7 @@ static void update_global_user_stats_with_user(THD *thd,
user_stats->binlog_bytes_written+=
(thd->status_var.binlog_bytes_written -
thd->org_status_var.binlog_bytes_written);
+ /* We are not counting rows in internal temporary tables here ! */
user_stats->rows_read+= (thd->status_var.rows_read -
thd->org_status_var.rows_read);
user_stats->rows_sent+= (thd->status_var.rows_sent -
diff --git a/sql/sql_expression_cache.cc b/sql/sql_expression_cache.cc
index c3450884610..d79dca03e42 100644
--- a/sql/sql_expression_cache.cc
+++ b/sql/sql_expression_cache.cc
@@ -249,7 +249,7 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
if (table_thd->is_error())
goto err;;
- if ((error= cache_table->file->ha_write_row(cache_table->record[0])))
+ if ((error= cache_table->file->ha_write_tmp_row(cache_table->record[0])))
{
/* create_myisam_from_heap will generate error if needed */
if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP) &&
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c3e1c785b59..5d2e838eda6 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -14322,7 +14322,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
/* remove heap table and change to use myisam table */
(void) table->file->ha_rnd_end();
- (void) table->file->close(); // This deletes the table !
+ (void) table->file->ha_close(); // This deletes the table !
delete table->file;
table->file=0;
plugin_unlock(0, table->s->db_plugin);
@@ -14343,7 +14343,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
table->file->print_error(write_err, MYF(0));
err_killed:
(void) table->file->ha_rnd_end();
- (void) new_table.file->close();
+ (void) new_table.file->ha_close();
err1:
new_table.file->ha_delete_table(new_table.s->table_name.str);
err2:
@@ -16183,8 +16183,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error= table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error= table->file->ha_update_tmp_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -16267,8 +16267,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error= table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error= table->file->ha_update_tmp_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 0cf7a9b1dc2..e2949c49608 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2814,7 +2814,7 @@ typedef struct st_lookup_field_values
bool schema_table_store_record(THD *thd, TABLE *table)
{
int error;
- if ((error= table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param;
if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 685b468a868..c6730bc0853 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -4681,6 +4681,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
int result_code;
bool need_repair_or_alter= 0;
DBUG_ENTER("mysql_admin_table");
+ DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options));
if (end_active_trans(thd))
DBUG_RETURN(1);
@@ -4705,9 +4706,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
bool fatal_error=0;
DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name));
- DBUG_PRINT("admin", ("extra_open_options: %u", extra_open_options));
strxmov(table_name, db, ".", table->table_name, NullS);
- thd->open_options|= extra_open_options;
table->lock_type= lock_type;
/* open only one table from local list of command */
{
@@ -4734,12 +4733,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
lex->sql_command == SQLCOM_ANALYZE ||
lex->sql_command == SQLCOM_OPTIMIZE)
thd->prepare_derived_at_open= TRUE;
+ thd->open_options|= extra_open_options;
open_and_lock_tables(thd, table);
+ thd->open_options&= ~extra_open_options;
thd->prepare_derived_at_open= FALSE;
thd->no_warnings_for_error= 0;
table->next_global= save_next_global;
table->next_local= save_next_local;
- thd->open_options&= ~extra_open_options;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (table->table)
{
@@ -4923,7 +4923,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
/* We use extra_open_options to be able to open crashed tables */
thd->open_options|= extra_open_options;
result_code= admin_recreate_table(thd, table);
- thd->open_options= ~extra_open_options;
+ thd->open_options&= ~extra_open_options;
goto send_result;
}
if (check_old_types || check_for_upgrade)
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 50505dd94a9..d0ec95efc27 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -60,7 +60,7 @@ int select_union::send_data(List<Item> &values)
if (thd->is_error())
return 1;
- if ((write_err= table->file->ha_write_row(table->record[0])))
+ if ((write_err= table->file->ha_write_tmp_row(table->record[0])))
{
if (write_err == HA_ERR_FOUND_DUPP_KEY)
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 4821fc2bd8f..bd088857ff7 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1868,7 +1868,7 @@ int multi_update::send_data(List<Item> &not_used_values)
*values_for_table[offset], TRUE, FALSE);
/* Write row, ignoring duplicated updates to a row */
- error= tmp_table->file->ha_write_row(tmp_table->record[0]);
+ error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
{
if (error &&
diff --git a/sql/table.cc b/sql/table.cc
index 2d2311d8987..bf8e0e46661 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2460,7 +2460,7 @@ int closefrm(register TABLE *table, bool free_share)
{
if (table->s->deleting)
table->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
- error=table->file->close();
+ error=table->file->ha_close();
}
table->alias.free();
if (table->expr_arena)
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 0297451871f..ae45e85c1b3 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -226,7 +226,6 @@ void ha_heap::update_key_stats()
int ha_heap::write_row(uchar * buf)
{
int res;
- ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
@@ -250,7 +249,6 @@ int ha_heap::write_row(uchar * buf)
int ha_heap::update_row(const uchar * old_data, uchar * new_data)
{
int res;
- ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
res= heap_update(file,old_data,new_data);
@@ -269,7 +267,6 @@ int ha_heap::update_row(const uchar * old_data, uchar * new_data)
int ha_heap::delete_row(const uchar * buf)
{
int res;
- ha_statistic_increment(&SSV::ha_delete_count);
res= heap_delete(file,buf);
if (!res && table->s->tmp_table == NO_TMP_TABLE &&
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
@@ -288,7 +285,6 @@ int ha_heap::index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
@@ -298,7 +294,6 @@ int ha_heap::index_read_last_map(uchar *buf, const uchar *key,
key_part_map keypart_map)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_key_count);
int error= heap_rkey(file, buf, active_index, key, keypart_map,
HA_READ_PREFIX_LAST);
table->status= error ? STATUS_NOT_FOUND : 0;
@@ -309,7 +304,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
- ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
@@ -318,7 +312,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
int ha_heap::index_next(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_next_count);
int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -327,7 +320,6 @@ int ha_heap::index_next(uchar * buf)
int ha_heap::index_prev(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_prev_count);
int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -336,7 +328,6 @@ int ha_heap::index_prev(uchar * buf)
int ha_heap::index_first(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_first_count);
int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -345,7 +336,6 @@ int ha_heap::index_first(uchar * buf)
int ha_heap::index_last(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- ha_statistic_increment(&SSV::ha_read_last_count);
int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -358,7 +348,6 @@ int ha_heap::rnd_init(bool scan)
int ha_heap::rnd_next(uchar *buf)
{
- ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error=heap_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -368,7 +357,6 @@ int ha_heap::rnd_pos(uchar * buf, uchar *pos)
{
int error;
HEAP_PTR heap_position;
- ha_statistic_increment(&SSV::ha_read_rnd_count);
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR));
error=heap_rrnd(file, buf, heap_position);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -582,7 +570,7 @@ int ha_heap::delete_table(const char *name)
void ha_heap::drop_table(const char *name)
{
file->s->delete_on_close= 1;
- close();
+ ha_close();
}
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 4963e6e7a04..57045ff2cd3 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -2524,7 +2524,7 @@ int ha_maria::delete_table(const char *name)
void ha_maria::drop_table(const char *name)
{
DBUG_ASSERT(file->s->temporary);
- (void) close();
+ (void) ha_close();
(void) maria_delete_table_files(name, 0);
}