summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--include/base64.h2
-rw-r--r--include/heap.h1
-rw-r--r--include/my_base.h11
-rw-r--r--include/my_bitmap.h53
-rw-r--r--include/myisam.h1
-rw-r--r--include/myisammrg.h1
-rw-r--r--include/mysql_com.h1
-rw-r--r--mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test67
-rw-r--r--mysql-test/install_test_db.sh7
-rwxr-xr-xmysql-test/mysql-test-run.pl1
-rw-r--r--mysql-test/mysql-test-run.sh10
-rw-r--r--mysql-test/r/binlog_row_mix_innodb_myisam.result158
-rw-r--r--mysql-test/r/binlog_stm_mix_innodb_myisam.result103
-rw-r--r--mysql-test/r/create.result46
-rw-r--r--mysql-test/r/federated.result1
-rw-r--r--mysql-test/r/func_gconcat.result6
-rw-r--r--mysql-test/r/func_time.result1
-rw-r--r--mysql-test/r/innodb_mysql.result60
-rw-r--r--mysql-test/r/insert.result26
-rw-r--r--mysql-test/r/loaddata.result9
-rw-r--r--mysql-test/r/multi_update.result80
-rw-r--r--mysql-test/r/ndb_index_unique.result2
-rw-r--r--mysql-test/r/ndb_replace.result2
-rw-r--r--mysql-test/r/rpl_ddl.result4
-rw-r--r--mysql-test/r/view_grant.result2
-rw-r--r--mysql-test/t/create.test37
-rw-r--r--mysql-test/t/federated.test57
-rw-r--r--mysql-test/t/func_gconcat.test4
-rw-r--r--mysql-test/t/func_time.test1
-rw-r--r--mysql-test/t/innodb_mysql.test51
-rw-r--r--mysql-test/t/insert.test29
-rw-r--r--mysql-test/t/loaddata.test4
-rw-r--r--mysql-test/t/multi_update.test30
-rw-r--r--mysql-test/t/view_grant.test2
-rw-r--r--mysql-test/valgrind.supp14
-rw-r--r--mysys/base64.c3
-rw-r--r--mysys/my_bitmap.c210
-rw-r--r--mysys/thr_lock.c14
-rw-r--r--sql/event.cc33
-rw-r--r--sql/field.cc203
-rw-r--r--sql/field.h11
-rw-r--r--sql/filesort.cc106
-rw-r--r--sql/ha_berkeley.cc59
-rw-r--r--sql/ha_berkeley.h3
-rw-r--r--sql/ha_federated.cc275
-rw-r--r--sql/ha_federated.h95
-rw-r--r--sql/ha_heap.cc40
-rw-r--r--sql/ha_heap.h10
-rw-r--r--sql/ha_innodb.cc81
-rw-r--r--sql/ha_innodb.h19
-rw-r--r--sql/ha_myisam.cc56
-rw-r--r--sql/ha_myisam.h3
-rw-r--r--sql/ha_myisammrg.cc35
-rw-r--r--sql/ha_myisammrg.h7
-rw-r--r--sql/ha_ndbcluster.cc209
-rw-r--r--sql/ha_ndbcluster.h13
-rw-r--r--sql/ha_ndbcluster_binlog.cc71
-rw-r--r--sql/ha_partition.cc191
-rw-r--r--sql/ha_partition.h32
-rw-r--r--sql/handler.cc316
-rw-r--r--sql/handler.h309
-rw-r--r--sql/item.cc77
-rw-r--r--sql/item.h24
-rw-r--r--sql/item_cmpfunc.cc38
-rw-r--r--sql/item_cmpfunc.h4
-rw-r--r--sql/item_func.cc8
-rw-r--r--sql/item_func.h2
-rw-r--r--sql/item_row.cc6
-rw-r--r--sql/item_row.h2
-rw-r--r--sql/item_strfunc.h6
-rw-r--r--sql/item_subselect.cc42
-rw-r--r--sql/item_subselect.h1
-rw-r--r--sql/item_sum.cc9
-rw-r--r--sql/item_sum.h2
-rw-r--r--sql/key.cc19
-rw-r--r--sql/log.cc21
-rw-r--r--sql/log_event.cc82
-rw-r--r--sql/mysql_priv.h20
-rw-r--r--sql/mysqld.cc11
-rw-r--r--sql/opt_range.cc353
-rw-r--r--sql/opt_range.h9
-rw-r--r--sql/opt_sum.cc61
-rw-r--r--sql/protocol.cc11
-rw-r--r--sql/records.cc11
-rw-r--r--sql/set_var.cc5
-rw-r--r--sql/share/errmsg.txt26
-rw-r--r--sql/sp.cc6
-rw-r--r--sql/sp_head.cc2
-rw-r--r--sql/spatial.cc2
-rw-r--r--sql/sql_acl.cc72
-rw-r--r--sql/sql_base.cc172
-rw-r--r--sql/sql_bitmap.h2
-rw-r--r--sql/sql_class.cc54
-rw-r--r--sql/sql_class.h41
-rw-r--r--sql/sql_delete.cc18
-rw-r--r--sql/sql_do.cc2
-rw-r--r--sql/sql_handler.cc13
-rw-r--r--sql/sql_help.cc28
-rw-r--r--sql/sql_insert.cc279
-rw-r--r--sql/sql_load.cc48
-rw-r--r--sql/sql_olap.cc8
-rw-r--r--sql/sql_parse.cc47
-rw-r--r--sql/sql_partition.cc27
-rw-r--r--sql/sql_plugin.cc3
-rw-r--r--sql/sql_prepare.cc11
-rw-r--r--sql/sql_repl.cc17
-rw-r--r--sql/sql_select.cc256
-rw-r--r--sql/sql_select.h23
-rw-r--r--sql/sql_show.cc70
-rw-r--r--sql/sql_table.cc101
-rw-r--r--sql/sql_trigger.h6
-rw-r--r--sql/sql_udf.cc7
-rw-r--r--sql/sql_union.cc10
-rw-r--r--sql/sql_update.cc131
-rw-r--r--sql/sql_view.cc17
-rw-r--r--sql/table.cc368
-rw-r--r--sql/table.h77
-rw-r--r--sql/tztime.cc11
-rw-r--r--storage/archive/ha_archive.cc54
-rw-r--r--storage/archive/ha_archive.h4
-rw-r--r--storage/blackhole/ha_blackhole.cc20
-rw-r--r--storage/blackhole/ha_blackhole.h8
-rw-r--r--storage/csv/ha_tina.cc39
-rw-r--r--storage/csv/ha_tina.h12
-rw-r--r--storage/example/ha_example.cc7
-rw-r--r--storage/example/ha_example.h4
-rw-r--r--storage/heap/hp_extra.c17
-rw-r--r--storage/heap/hp_test2.c2
-rw-r--r--storage/myisam/ft_boolean_search.c5
-rw-r--r--storage/myisam/mi_extra.c56
-rw-r--r--storage/myisam/mi_search.c38
-rw-r--r--storage/myisam/mi_test2.c4
-rw-r--r--storage/myisam/myisampack.c4
-rw-r--r--storage/myisammrg/myrg_extra.c26
-rw-r--r--unittest/mysys/base64.t.c2
136 files changed, 4049 insertions, 2321 deletions
diff --git a/.bzrignore b/.bzrignore
index 095c04b36ee..f176efe0e0a 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1770,3 +1770,4 @@ vio/viotest-sslconnect.cpp
vio/viotest.cpp
zlib/*.ds?
zlib/*.vcproj
+mysys/test_bitmap
diff --git a/include/base64.h b/include/base64.h
index a2b0fc0352b..4653e824a9a 100644
--- a/include/base64.h
+++ b/include/base64.h
@@ -21,8 +21,6 @@
extern "C" {
#endif
-#include <my_global.h>
-
/*
Calculate how much memory needed for dst of base64_encode()
*/
diff --git a/include/heap.h b/include/heap.h
index 855cff117e2..bf956184d3c 100644
--- a/include/heap.h
+++ b/include/heap.h
@@ -209,6 +209,7 @@ extern int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
extern int heap_delete_table(const char *name);
extern void heap_drop_table(HP_INFO *info);
extern int heap_extra(HP_INFO *info,enum ha_extra_function function);
+extern int heap_reset(HP_INFO *info);
extern int heap_rename(const char *old_name,const char *new_name);
extern int heap_panic(enum ha_panic_function flag);
extern int heap_rsame(HP_INFO *info,byte *record,int inx);
diff --git a/include/my_base.h b/include/my_base.h
index e014f7c33b7..d663fac153b 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -105,7 +105,7 @@ enum ha_key_alg {
enum ha_extra_function {
HA_EXTRA_NORMAL=0, /* Optimize for space (def) */
HA_EXTRA_QUICK=1, /* Optimize for speed */
- HA_EXTRA_RESET=2, /* Reset database to after open */
+ HA_EXTRA_NOT_USED=2,
HA_EXTRA_CACHE=3, /* Cache record in HA_rrnd() */
HA_EXTRA_NO_CACHE=4, /* End caching of records (def) */
HA_EXTRA_NO_READCHECK=5, /* No readcheck on update */
@@ -131,15 +131,6 @@ enum ha_extra_function {
HA_EXTRA_RESET_STATE, /* Reset positions */
HA_EXTRA_IGNORE_DUP_KEY, /* Dup keys don't rollback everything*/
HA_EXTRA_NO_IGNORE_DUP_KEY,
- /*
- Instructs InnoDB to retrieve all columns (except in key read), not just
- those where field->query_id is the same as the current query id
- */
- HA_EXTRA_RETRIEVE_ALL_COLS,
- /*
- Instructs InnoDB to retrieve at least all the primary key columns
- */
- HA_EXTRA_RETRIEVE_PRIMARY_KEY,
HA_EXTRA_PREPARE_FOR_DELETE,
HA_EXTRA_PREPARE_FOR_UPDATE, /* Remove read cache if problems */
HA_EXTRA_PRELOAD_BUFFER_SIZE, /* Set buffer size for preloading */
diff --git a/include/my_bitmap.h b/include/my_bitmap.h
index 428ca7dc702..0b16a1b4832 100644
--- a/include/my_bitmap.h
+++ b/include/my_bitmap.h
@@ -17,19 +17,18 @@
#ifndef _my_bitmap_h_
#define _my_bitmap_h_
-#ifdef THREAD
-#include <my_pthread.h>
-#endif
-
#define MY_BIT_NONE (~(uint) 0)
+#include <m_string.h>
+
+typedef uint32 my_bitmap_map;
typedef struct st_bitmap
{
- uint32 *bitmap;
+ my_bitmap_map *bitmap;
uint n_bits; /* number of bits occupied by the above */
- uint32 last_word_mask;
- uint32 *last_word_ptr;
+ my_bitmap_map last_word_mask;
+ my_bitmap_map *last_word_ptr;
/*
mutex will be acquired for the duration of each bitmap operation if
thread_safe flag in bitmap_init was set. Otherwise, we optimize by not
@@ -43,12 +42,16 @@ typedef struct st_bitmap
#ifdef __cplusplus
extern "C" {
#endif
-extern my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits, my_bool thread_safe);
+extern my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits,
+ my_bool thread_safe);
extern my_bool bitmap_is_clear_all(const MY_BITMAP *map);
extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
extern my_bool bitmap_is_set_all(const MY_BITMAP *map);
extern my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2);
+extern my_bool bitmap_is_overlapping(const MY_BITMAP *map1,
+ const MY_BITMAP *map2);
extern my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit);
+extern my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit);
extern my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit);
extern uint bitmap_set_next(MY_BITMAP *map);
extern uint bitmap_get_first(const MY_BITMAP *map);
@@ -62,6 +65,7 @@ extern void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_invert(MY_BITMAP *map);
+extern void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2);
extern uint bitmap_lock_set_next(MY_BITMAP *map);
extern void bitmap_lock_clear_bit(MY_BITMAP *map, uint bitmap_bit);
@@ -88,7 +92,7 @@ extern void bitmap_lock_xor(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_lock_invert(MY_BITMAP *map);
#endif
/* Fast, not thread safe, bitmap functions */
-#define bitmap_buffer_size(bits) 4*(((bits)+31)/32);
+#define bitmap_buffer_size(bits) (((bits)+31)/32)*4
#define no_bytes_in_map(map) (((map)->n_bits + 7)/8)
#define no_words_in_map(map) (((map)->n_bits + 31)/32)
#define bytes_word_aligned(bytes) (4*((bytes + 3)/4))
@@ -98,28 +102,28 @@ extern void bitmap_lock_invert(MY_BITMAP *map);
^= (1 << ((BIT) & 7)))
#define _bitmap_clear_bit(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
&= ~ (1 << ((BIT) & 7)))
-#define _bitmap_is_set(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
- & (1 << ((BIT) & 7)))
+#define _bitmap_is_set(MAP, BIT) (uint) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
+ & (1 << ((BIT) & 7)))
#ifndef DBUG_OFF
-static inline uint32
+static inline void
bitmap_set_bit(MY_BITMAP *map,uint bit)
{
DBUG_ASSERT(bit < (map)->n_bits);
- return _bitmap_set_bit(map,bit);
+ _bitmap_set_bit(map,bit);
}
-static inline uint32
+static inline void
bitmap_flip_bit(MY_BITMAP *map,uint bit)
{
DBUG_ASSERT(bit < (map)->n_bits);
- return _bitmap_flip_bit(map,bit);
+ _bitmap_flip_bit(map,bit);
}
-static inline uint32
+static inline void
bitmap_clear_bit(MY_BITMAP *map,uint bit)
{
DBUG_ASSERT(bit < (map)->n_bits);
- return _bitmap_clear_bit(map,bit);
+ _bitmap_clear_bit(map,bit);
}
-static inline uint32
+static inline uint
bitmap_is_set(const MY_BITMAP *map,uint bit)
{
DBUG_ASSERT(bit < (map)->n_bits);
@@ -131,11 +135,16 @@ bitmap_is_set(const MY_BITMAP *map,uint bit)
#define bitmap_clear_bit(MAP, BIT) _bitmap_clear_bit(MAP, BIT)
#define bitmap_is_set(MAP, BIT) _bitmap_is_set(MAP, BIT)
#endif
-#define bitmap_cmp(MAP1, MAP2) \
- (memcmp((MAP1)->bitmap, (MAP2)->bitmap, 4*no_words_in_map((MAP1)))==0)
+
+static inline my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2)
+{
+ *(map1)->last_word_ptr|= (map1)->last_word_mask;
+ *(map2)->last_word_ptr|= (map2)->last_word_mask;
+ return memcmp((map1)->bitmap, (map2)->bitmap, 4*no_words_in_map((map1)))==0;
+}
+
#define bitmap_clear_all(MAP) \
- { memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); \
- *(MAP)->last_word_ptr|= (MAP)->last_word_mask; }
+ { memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); }
#define bitmap_set_all(MAP) \
(memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP))))
diff --git a/include/myisam.h b/include/myisam.h
index db1a7bd984d..075779a6e42 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -303,6 +303,7 @@ extern int mi_rename(const char *from, const char *to);
extern int mi_extra(struct st_myisam_info *file,
enum ha_extra_function function,
void *extra_arg);
+extern int mi_reset(struct st_myisam_info *file);
extern ha_rows mi_records_in_range(struct st_myisam_info *info,int inx,
key_range *min_key, key_range *max_key);
extern int mi_log(int activate_log);
diff --git a/include/myisammrg.h b/include/myisammrg.h
index de8a36c2d0a..f23759e22e1 100644
--- a/include/myisammrg.h
+++ b/include/myisammrg.h
@@ -99,6 +99,7 @@ extern int myrg_create(const char *name, const char **table_names,
uint insert_method, my_bool fix_names);
extern int myrg_extra(MYRG_INFO *file,enum ha_extra_function function,
void *extra_arg);
+extern int myrg_reset(MYRG_INFO *info);
extern void myrg_extrafunc(MYRG_INFO *info,invalidator_by_filename inv);
extern ha_rows myrg_records_in_range(MYRG_INFO *info,int inx,
key_range *min_key, key_range *max_key);
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 2af0fb86906..5a64e5ad68d 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -98,6 +98,7 @@ enum enum_server_command
#define BINCMP_FLAG 131072 /* Intern: Used by sql_yacc */
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
+#define FIELD_IN_ADD_INDEX (1<< 20) /* Intern: Field used in ADD INDEX */
#define REFRESH_GRANT 1 /* Refresh grant tables */
#define REFRESH_LOG 2 /* Start on new log file */
diff --git a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
index b75a326d5a8..47e1ffb23c7 100644
--- a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
+++ b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
@@ -234,8 +234,8 @@ select (@after-@before) >= 2;
drop table t1,t2;
commit;
-# test for BUG#7947 - DO RELEASE_LOCK() not written to binlog on rollback in the middle
-# of a transaction
+# test for BUG#7947 - DO RELEASE_LOCK() not written to binlog on rollback in
+# the middle of a transaction
connection con2;
begin;
@@ -265,6 +265,68 @@ drop table t0,t2;
# End of 4.1 tests
+#
+# Test behaviour of CREATE ... SELECT when mixing MyISAM and InnoDB tables
+#
+
+set autocommit=0;
+CREATE TABLE t1 (a int, b int) engine=myisam;
+reset master;
+INSERT INTO t1 values (1,1),(1,2);
+--error 1062
+CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
+# This should give warning
+DROP TABLE if exists t2;
+INSERT INTO t1 values (3,3);
+--error 1062
+CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
+ROLLBACK;
+# This should give warning
+DROP TABLE IF EXISTS t2;
+
+CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
+INSERT INTO t1 VALUES (4,4);
+--error 1062
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+SELECT * from t2;
+TRUNCATE table t2;
+INSERT INTO t1 VALUES (5,5);
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * FROM t2;
+DROP TABLE t2;
+
+INSERT INTO t1 values (6,6);
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
+INSERT INTO t1 values (7,7);
+ROLLBACK;
+INSERT INTO t1 values (8,8);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+COMMIT;
+INSERT INTO t1 values (9,9);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ROLLBACK;
+SELECT * from t2;
+TRUNCATE table t2;
+INSERT INTO t1 values (10,10);
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * from t1;
+INSERT INTO t2 values (100,100);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+COMMIT;
+INSERT INTO t2 values (101,101);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ROLLBACK;
+SELECT * from t2;
+DROP TABLE t1,t2;
+--replace_regex /table_id: [0-9]+/table_id: #/
+show binlog events from 102;
+
# Test for BUG#16559 (ROLLBACK should always have a zero error code in
# binlog). Has to be here and not earlier, as the SELECTs influence
# XIDs differently between normal and ps-protocol (and SHOW BINLOG
@@ -283,3 +345,4 @@ disconnect con3;
connection con4;
select get_lock("a",10); # wait for rollback to finish
+
diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh
index 4554b92857e..9006957019a 100644
--- a/mysql-test/install_test_db.sh
+++ b/mysql-test/install_test_db.sh
@@ -34,7 +34,6 @@ if [ x$1 = x"-slave" ]
then
shift 1
data=var/slave-data
- ldata=$fix_bin/var/slave-data
else
if [ x$1 = x"-1" ]
then
@@ -42,8 +41,8 @@ else
else
data=var/master-data
fi
- ldata=$fix_bin/$data
fi
+ldata=$fix_bin/$data
mdata=$data/mysql
EXTRA_ARG=""
@@ -81,9 +80,7 @@ basedir=.
EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/"
fi
-mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables \
- --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb \
- $EXTRA_ARG"
+mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG"
echo "running $mysqld_boot"
if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 0087459e1dd..120ae00ca86 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -1927,6 +1927,7 @@ sub install_db ($$) {
mtr_add_arg($args, "--skip-innodb");
mtr_add_arg($args, "--skip-ndbcluster");
mtr_add_arg($args, "--skip-bdb");
+ mtr_add_arg($args, "--tmpdir=.");
if ( ! $opt_netware )
{
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index f56099f3a49..c018181b929 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -1121,7 +1121,10 @@ mysql_install_db () {
if [ ! -z "$USE_NDBCLUSTER" ]
then
$ECHO "Installing Master Databases 1"
- $INSTALL_DB -1
+# $INSTALL_DB -1
+ $RM -rf var/master-data1
+ mkdir var/master-data1
+ cp -r var/master-data/* var/master-data1
if [ $? != 0 ]; then
error "Could not install master test DBs 1"
exit 1
@@ -1129,7 +1132,9 @@ mysql_install_db () {
fi
$ECHO "Installing Slave Databases"
$RM -rf $SLAVE_MYDDIR $MY_LOG_DIR/*
- $INSTALL_DB -slave
+# $INSTALL_DB -slave
+ mkdir var/slave-data
+ cp -r var/master-data/* var/slave-data
if [ $? != 0 ]; then
error "Could not install slave test DBs"
exit 1
@@ -2158,6 +2163,7 @@ then
# Remove files that can cause problems
$RM -rf $MYSQL_TEST_DIR/var/ndbcluster
+ $RM -rf $MYSQL_TEST_DIR/var/tmp/snapshot*
$RM -f $MYSQL_TEST_DIR/var/run/* $MYSQL_TEST_DIR/var/tmp/*
# Remove old berkeley db log files that can confuse the server
diff --git a/mysql-test/r/binlog_row_mix_innodb_myisam.result b/mysql-test/r/binlog_row_mix_innodb_myisam.result
index 078a95d5abd..84959684c42 100644
--- a/mysql-test/r/binlog_row_mix_innodb_myisam.result
+++ b/mysql-test/r/binlog_row_mix_innodb_myisam.result
@@ -234,8 +234,6 @@ commit;
begin;
create temporary table ti (a int) engine=innodb;
rollback;
-Warnings:
-Warning 1196 Some non-transactional changed tables couldn't be rolled back
insert into ti values(1);
set autocommit=0;
create temporary table t1 (a int) engine=myisam;
@@ -285,6 +283,162 @@ master-bin.000001 1260 Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 1294 Query 1 # use `test`; create table t2 (n int) engine=innodb
do release_lock("lock1");
drop table t0,t2;
+set autocommit=0;
+CREATE TABLE t1 (a int, b int) engine=myisam;
+reset master;
+INSERT INTO t1 values (1,1),(1,2);
+CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+INSERT INTO t1 values (3,3);
+CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+DROP TABLE IF EXISTS t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
+INSERT INTO t1 VALUES (4,4);
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+TRUNCATE table t2;
+INSERT INTO t1 VALUES (5,5);
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * FROM t2;
+a b
+DROP TABLE t2;
+INSERT INTO t1 values (6,6);
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
+INSERT INTO t1 values (7,7);
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+INSERT INTO t1 values (8,8);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+COMMIT;
+INSERT INTO t1 values (9,9);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+SELECT * from t2;
+a b
+TRUNCATE table t2;
+INSERT INTO t1 values (10,10);
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t1;
+a b
+1 1
+1 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+INSERT INTO t2 values (100,100);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+COMMIT;
+INSERT INTO t2 values (101,101);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+SELECT * from t2;
+a b
+100 100
+DROP TABLE t1,t2;
+show binlog events from 102;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 102 Table_map 1 142 table_id: # (test.t1)
+master-bin.000001 142 Write_rows 1 189 table_id: # flags: STMT_END_F
+master-bin.000001 189 Query 1 257 use `test`; BEGIN
+master-bin.000001 257 Query 1 182 use `test`; CREATE TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 439 Table_map 1 222 table_id: # (test.t2)
+master-bin.000001 479 Write_rows 1 260 table_id: # flags: STMT_END_F
+master-bin.000001 517 Xid 1 544 COMMIT /* xid= */
+master-bin.000001 544 Query 1 630 use `test`; DROP TABLE if exists t2
+master-bin.000001 630 Table_map 1 670 table_id: # (test.t1)
+master-bin.000001 670 Write_rows 1 708 table_id: # flags: STMT_END_F
+master-bin.000001 708 Query 1 776 use `test`; BEGIN
+master-bin.000001 776 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 968 Query 1 1039 use `test`; ROLLBACK
+master-bin.000001 1039 Query 1 1125 use `test`; DROP TABLE IF EXISTS t2
+master-bin.000001 1125 Query 1 1249 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 1249 Table_map 1 1289 table_id: # (test.t1)
+master-bin.000001 1289 Write_rows 1 1327 table_id: # flags: STMT_END_F
+master-bin.000001 1327 Query 1 1395 use `test`; BEGIN
+master-bin.000001 1395 Query 1 182 use `test`; CREATE TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 1577 Table_map 1 222 table_id: # (test.t2)
+master-bin.000001 1617 Write_rows 1 260 table_id: # flags: STMT_END_F
+master-bin.000001 1655 Xid 1 1682 COMMIT /* xid= */
+master-bin.000001 1682 Query 1 80 use `test`; TRUNCATE table t2
+master-bin.000001 1762 Xid 1 1789 COMMIT /* xid= */
+master-bin.000001 1789 Table_map 1 1829 table_id: # (test.t1)
+master-bin.000001 1829 Write_rows 1 1867 table_id: # flags: STMT_END_F
+master-bin.000001 1867 Query 1 1935 use `test`; BEGIN
+master-bin.000001 1935 Table_map 1 40 table_id: # (test.t2)
+master-bin.000001 1975 Write_rows 1 78 table_id: # flags: STMT_END_F
+master-bin.000001 2013 Xid 1 2040 COMMIT /* xid= */
+master-bin.000001 2040 Query 1 2116 use `test`; DROP TABLE t2
+master-bin.000001 2116 Table_map 1 2156 table_id: # (test.t1)
+master-bin.000001 2156 Write_rows 1 2194 table_id: # flags: STMT_END_F
+master-bin.000001 2194 Table_map 1 2234 table_id: # (test.t1)
+master-bin.000001 2234 Write_rows 1 2272 table_id: # flags: STMT_END_F
+master-bin.000001 2272 Table_map 1 2312 table_id: # (test.t1)
+master-bin.000001 2312 Write_rows 1 2350 table_id: # flags: STMT_END_F
+master-bin.000001 2350 Query 1 2418 use `test`; BEGIN
+master-bin.000001 2418 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 2610 Xid 1 2637 COMMIT /* xid= */
+master-bin.000001 2637 Table_map 1 2677 table_id: # (test.t1)
+master-bin.000001 2677 Write_rows 1 2715 table_id: # flags: STMT_END_F
+master-bin.000001 2715 Query 1 2783 use `test`; BEGIN
+master-bin.000001 2783 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 2975 Query 1 3046 use `test`; ROLLBACK
+master-bin.000001 3046 Query 1 80 use `test`; TRUNCATE table t2
+master-bin.000001 3126 Xid 1 3153 COMMIT /* xid= */
+master-bin.000001 3153 Table_map 1 3193 table_id: # (test.t1)
+master-bin.000001 3193 Write_rows 1 3231 table_id: # flags: STMT_END_F
+master-bin.000001 3231 Query 1 3299 use `test`; BEGIN
+master-bin.000001 3299 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB
+master-bin.000001 3491 Xid 1 3518 COMMIT /* xid= */
+master-bin.000001 3518 Query 1 3622 use `test`; DROP TABLE `t1` /* generated by server */
reset master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=myisam;
diff --git a/mysql-test/r/binlog_stm_mix_innodb_myisam.result b/mysql-test/r/binlog_stm_mix_innodb_myisam.result
index c5abcff4246..e836cae0b15 100644
--- a/mysql-test/r/binlog_stm_mix_innodb_myisam.result
+++ b/mysql-test/r/binlog_stm_mix_innodb_myisam.result
@@ -209,8 +209,6 @@ commit;
begin;
create temporary table ti (a int) engine=innodb;
rollback;
-Warnings:
-Warning 1196 Some non-transactional changed tables couldn't be rolled back
insert into ti values(1);
set autocommit=0;
create temporary table t1 (a int) engine=myisam;
@@ -256,6 +254,107 @@ master-bin.000001 1654 Query 1 # use `test`; create table t2 (n int) engine=inno
master-bin.000001 1754 Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti`
do release_lock("lock1");
drop table t0,t2;
+set autocommit=0;
+CREATE TABLE t1 (a int, b int) engine=myisam;
+reset master;
+INSERT INTO t1 values (1,1),(1,2);
+CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+INSERT INTO t1 values (3,3);
+CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+DROP TABLE IF EXISTS t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
+INSERT INTO t1 VALUES (4,4);
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+TRUNCATE table t2;
+INSERT INTO t1 VALUES (5,5);
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * FROM t2;
+a b
+DROP TABLE t2;
+INSERT INTO t1 values (6,6);
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
+INSERT INTO t1 values (7,7);
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+INSERT INTO t1 values (8,8);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+COMMIT;
+INSERT INTO t1 values (9,9);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+SELECT * from t2;
+a b
+TRUNCATE table t2;
+INSERT INTO t1 values (10,10);
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t1;
+a b
+1 1
+1 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+INSERT INTO t2 values (100,100);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+COMMIT;
+INSERT INTO t2 values (101,101);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+ROLLBACK;
+SELECT * from t2;
+a b
+100 100
+DROP TABLE t1,t2;
+show binlog events from 102;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 102 Query 1 198 use `test`; INSERT INTO t1 values (1,1),(1,2)
+master-bin.000001 198 Query 1 284 use `test`; DROP TABLE if exists t2
+master-bin.000001 284 Query 1 374 use `test`; INSERT INTO t1 values (3,3)
+master-bin.000001 374 Query 1 460 use `test`; DROP TABLE IF EXISTS t2
+master-bin.000001 460 Query 1 584 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 584 Query 1 674 use `test`; INSERT INTO t1 VALUES (4,4)
+master-bin.000001 674 Query 1 80 use `test`; TRUNCATE table t2
+master-bin.000001 754 Xid 1 781 COMMIT /* xid= */
+master-bin.000001 781 Query 1 871 use `test`; INSERT INTO t1 VALUES (5,5)
+master-bin.000001 871 Query 1 947 use `test`; DROP TABLE t2
+master-bin.000001 947 Query 1 1037 use `test`; INSERT INTO t1 values (6,6)
+master-bin.000001 1037 Query 1 1171 use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 1171 Query 1 1261 use `test`; INSERT INTO t1 values (7,7)
+master-bin.000001 1261 Query 1 1351 use `test`; INSERT INTO t1 values (8,8)
+master-bin.000001 1351 Query 1 1441 use `test`; INSERT INTO t1 values (9,9)
+master-bin.000001 1441 Query 1 80 use `test`; TRUNCATE table t2
+master-bin.000001 1521 Xid 1 1548 COMMIT /* xid= */
+master-bin.000001 1548 Query 1 1640 use `test`; INSERT INTO t1 values (10,10)
+master-bin.000001 1640 Query 1 1708 use `test`; BEGIN
+master-bin.000001 1708 Query 1 94 use `test`; INSERT INTO t2 values (100,100)
+master-bin.000001 1802 Xid 1 1829 COMMIT /* xid= */
+master-bin.000001 1829 Query 1 1908 use `test`; DROP TABLE t1,t2
reset master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=myisam;
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index d45960cf787..6f8c319eb6f 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -266,6 +266,7 @@ select * from t1;
0 1 2
0 0 1
drop table t1;
+flush status;
create table t1 (a int not null, b int, primary key (a));
insert into t1 values (1,1);
create table if not exists t1 select 2;
@@ -281,6 +282,13 @@ Warnings:
Note 1050 Table 't1' already exists
create table if not exists t1 select 3 as 'a',3 as 'b';
ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
+show warnings;
+Level Code Message
+Note 1050 Table 't1' already exists
+Error 1062 Duplicate entry '3' for key 'PRIMARY'
+show status like "Opened_tables";
+Variable_name Value
+Opened_tables 2
select * from t1;
a b
1 1
@@ -778,3 +786,41 @@ Warnings:
Warning 1071 Specified key was too long; max key length is 765 bytes
insert into t1 values('aaa');
drop table t1;
+CREATE TABLE t1 (a int, b int);
+insert into t1 values (1,1),(1,2);
+CREATE TABLE t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TABLE t2 (a int, b int, primary key (a));
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+1 1
+TRUNCATE table t2;
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+1 1
+drop table t2;
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+1 1
+TRUNCATE table t2;
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+1 1
+drop table t1,t2;
diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result
index 5f735ebe926..b3d6e10448b 100644
--- a/mysql-test/r/federated.result
+++ b/mysql-test/r/federated.result
@@ -1601,6 +1601,7 @@ fld_cid fld_name fld_parentid fld_delt
5 Torkel 0 0
DROP TABLE federated.t1;
DROP TABLE federated.bug_17377_table;
+DROP TABLE federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result
index 62b4ddf868a..773efe50749 100644
--- a/mysql-test/r/func_gconcat.result
+++ b/mysql-test/r/func_gconcat.result
@@ -309,6 +309,12 @@ a grp
1 2
2 4,3
3 5
+select group_concat(c order by (select concat(5-t1.c,group_concat(c order by a)) from t2 where t2.a=t1.a)) as grp from t1;
+grp
+5,4,3,2
+select group_concat(c order by (select concat(t1.c,group_concat(c)) from t2 where a=t1.a)) as grp from t1;
+grp
+2,3,4,5
select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp;
a c grp
3 5 3,3
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index b5fd35d926f..b9b717cb39c 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -758,6 +758,7 @@ time_format('100:00:00', '%H %k %h %I %l')
100 100 04 04 4
create table t1 (a timestamp default '2005-05-05 01:01:01',
b timestamp default '2005-05-05 01:01:01');
+drop function if exists t_slow_sysdate;
create function t_slow_sysdate() returns timestamp
begin
do sleep(2);
diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result
index 878c5cb5451..fe9940f753f 100644
--- a/mysql-test/r/innodb_mysql.result
+++ b/mysql-test/r/innodb_mysql.result
@@ -1 +1,59 @@
-drop table if exists t1;
+drop table if exists t1.t2;
+set storage_engine=innodb;
+CREATE TABLE t1 (a int, b int);
+insert into t1 values (1,1),(1,2);
+CREATE TABLE t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE TABLE t2 (a int, b int, primary key (a));
+BEGIN;
+INSERT INTO t2 values(100,100);
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+100 100
+ROLLBACK;
+SELECT * from t2;
+a b
+100 100
+TRUNCATE table t2;
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+drop table t2;
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
+BEGIN;
+INSERT INTO t2 values(100,100);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+100 100
+COMMIT;
+BEGIN;
+INSERT INTO t2 values(101,101);
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+100 100
+101 101
+ROLLBACK;
+SELECT * from t2;
+a b
+100 100
+TRUNCATE table t2;
+INSERT INTO t2 select * from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+SELECT * from t2;
+a b
+drop table t1,t2;
diff --git a/mysql-test/r/insert.result b/mysql-test/r/insert.result
index 5e9ab480558..18bd6d7e796 100644
--- a/mysql-test/r/insert.result
+++ b/mysql-test/r/insert.result
@@ -2,8 +2,8 @@ drop table if exists t1,t2,t3;
create table t1 (a int not null);
insert into t1 values (1);
insert into t1 values (a+2);
-insert into t1 values (a+3);
-insert into t1 values (4),(a+5);
+insert into t1 values (a+3),(a+4);
+insert into t1 values (5),(a+6);
select * from t1;
a
1
@@ -11,6 +11,7 @@ a
3
4
5
+6
drop table t1;
create table t1 (id int not null auto_increment primary key, username varchar(32) not null, unique (username));
insert into t1 values (0,"mysql");
@@ -299,3 +300,24 @@ select count(*) from t2;
count(*)
25500
drop table t1,t2,t3;
+create table t1 (a int, b int);
+insert into t1 (a,b) values (a,b);
+insert into t1 SET a=1, b=a+1;
+insert into t1 (a,b) select 1,2;
+INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
+prepare stmt1 from ' replace into t1 (a,a) select 100, ''hundred'' ';
+execute stmt1;
+ERROR 42000: Column 'a' specified twice
+insert into t1 (a,b,b) values (1,1,1);
+ERROR 42000: Column 'b' specified twice
+insert into t1 (a,a) values (1,1,1);
+ERROR 21S01: Column count doesn't match value count at row 1
+insert into t1 (a,a) values (1,1);
+ERROR 42000: Column 'a' specified twice
+insert into t1 SET a=1,b=2,a=1;
+ERROR 42000: Column 'a' specified twice
+insert into t1 (b,b) select 1,2;
+ERROR 42000: Column 'b' specified twice
+INSERT INTO t1 (b,b) SELECT 0,0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
+ERROR 42000: Column 'b' specified twice
+drop table t1;
diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result
index 0b314defece..72beee4b2e3 100644
--- a/mysql-test/r/loaddata.result
+++ b/mysql-test/r/loaddata.result
@@ -115,6 +115,15 @@ select @a, @b;
@a @b
NULL 15
truncate table t1;
+load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 set c=b;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 2 doesn't contain data for all columns
+select * from t1;
+a b c
+NULL 10 10
+NULL 15 15
+truncate table t1;
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (a, b) set c="Wow";
select * from t1;
a b c
diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result
index ea02a703c65..6fdd105fd6c 100644
--- a/mysql-test/r/multi_update.result
+++ b/mysql-test/r/multi_update.result
@@ -519,3 +519,83 @@ a
30
drop view v1;
drop table t1, t2;
+create table t1 (i1 int, i2 int, i3 int);
+create table t2 (id int, c1 varchar(20), c2 varchar(20));
+insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from t2;
+id c1 c2
+9 abc def
+5 opq lmn
+2 test t t test
+update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
+select * from t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table t1, t2;
+create table t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1));
+create table t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id));
+insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from t2 order by id;
+id c1 c2
+2 test t t test
+5 opq lmn
+9 abc def
+update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
+select * from t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table t1, t2;
diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result
index 497ad973e8b..e18125f159e 100644
--- a/mysql-test/r/ndb_index_unique.result
+++ b/mysql-test/r/ndb_index_unique.result
@@ -144,7 +144,7 @@ b int unsigned not null,
c int unsigned,
UNIQUE USING HASH (b, c)
) engine=ndbcluster;
-ERROR 42000: Column 'c' is used with UNIQUE or INDEX but is not defined as NOT NULL
+ERROR 42000: Table handler doesn't support NULL in given index. Please change column 'c' to be NOT NULL or use another handler
CREATE TABLE t3 (
a int unsigned NOT NULL,
b int unsigned not null,
diff --git a/mysql-test/r/ndb_replace.result b/mysql-test/r/ndb_replace.result
index 5e49968ca64..cdcd935bfcc 100644
--- a/mysql-test/r/ndb_replace.result
+++ b/mysql-test/r/ndb_replace.result
@@ -30,4 +30,4 @@ REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
i j k
3 1 42
-17 2 24
+17 2 NULL
diff --git a/mysql-test/r/rpl_ddl.result b/mysql-test/r/rpl_ddl.result
index e828f2c1e31..cfa2b4dc76d 100644
--- a/mysql-test/r/rpl_ddl.result
+++ b/mysql-test/r/rpl_ddl.result
@@ -359,8 +359,6 @@ MAX(f1)
-------- switch to master -------
ROLLBACK;
-Warnings:
-Warning 1196 Some non-transactional changed tables couldn't be rolled back
SELECT MAX(f1) FROM t1;
MAX(f1)
5
@@ -579,8 +577,6 @@ MAX(f1)
-------- switch to master -------
ROLLBACK;
-Warnings:
-Warning 1196 Some non-transactional changed tables couldn't be rolled back
SELECT MAX(f1) FROM t1;
MAX(f1)
8
diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result
index 928e3635ec6..2a27dba6101 100644
--- a/mysql-test/r/view_grant.result
+++ b/mysql-test/r/view_grant.result
@@ -1,5 +1,5 @@
drop database if exists mysqltest;
-drop view if exists v1;
+drop view if exists v1,v2,v3;
grant create view on test.* to test@localhost;
show grants for test@localhost;
Grants for test@localhost
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index fe8cfe70c4e..e4a7d1cd9af 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -226,6 +226,7 @@ drop table t1;
# Test create table if not exists with duplicate key error
#
+flush status;
create table t1 (a int not null, b int, primary key (a));
insert into t1 values (1,1);
create table if not exists t1 select 2;
@@ -233,6 +234,8 @@ select * from t1;
create table if not exists t1 select 3 as 'a',4 as 'b';
--error 1062
create table if not exists t1 select 3 as 'a',3 as 'b';
+show warnings;
+show status like "Opened_tables";
select * from t1;
drop table t1;
@@ -676,3 +679,37 @@ insert into t1 values('aaa');
drop table t1;
# End of 5.0 tests
+
+#
+# Test of behaviour with CREATE ... SELECT
+#
+
+CREATE TABLE t1 (a int, b int);
+insert into t1 values (1,1),(1,2);
+--error 1062
+CREATE TABLE t2 (primary key (a)) select * from t1;
+# This should give warning
+drop table if exists t2;
+--error 1062
+CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
+# This should give warning
+drop table if exists t2;
+CREATE TABLE t2 (a int, b int, primary key (a));
+--error 1062
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+SELECT * from t2;
+TRUNCATE table t2;
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * from t2;
+drop table t2;
+
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+SELECT * from t2;
+TRUNCATE table t2;
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * from t2;
+drop table t1,t2;
diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test
index 3c43fb1d1f9..f6799f3630a 100644
--- a/mysql-test/t/federated.test
+++ b/mysql-test/t/federated.test
@@ -1,6 +1,6 @@
# should work with embedded server after mysqltest is fixed
--- source include/not_embedded.inc
-source include/federated.inc;
+--source include/not_embedded.inc
+--source include/federated.inc
connection slave;
DROP TABLE IF EXISTS federated.t1;
@@ -1310,6 +1310,57 @@ select * from federated.t1 where fld_parentid=0 and fld_delt=0;
DROP TABLE federated.t1;
connection slave;
DROP TABLE federated.bug_17377_table;
+DROP TABLE federated.t1;
+
+#
+# Test multi updates and deletes without keys
+#
+
+# The following can be enabled when bug #19773 has been fixed
+--disable_parsing
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int);
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
+connection master;
+eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete t1.*,t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+connection slave;
+drop table federated.t1, federated.t2;
+connection master;
+# Test multi updates and deletes with keys
+
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
+connection master;
+eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete t1.*,t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+connection slave;
+drop table federated.t1, federated.t2;
+connection master;
+--enable_parsing
-source include/federated_cleanup.inc;
+--source include/federated_cleanup.inc
diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test
index fbfdfa3b5d0..7fd7edddf28 100644
--- a/mysql-test/t/func_gconcat.test
+++ b/mysql-test/t/func_gconcat.test
@@ -176,8 +176,8 @@ select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5)
select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1;
# The following returns random results as we are sorting on blob addresses
-# select group_concat(c order by (select group_concat(c order by a) from t2 where t2.a=t1.a)) as grp from t1;
-# select group_concat(c order by (select group_concat(c) from t2 where a=t1.a)) as grp from t1;
+select group_concat(c order by (select concat(5-t1.c,group_concat(c order by a)) from t2 where t2.a=t1.a)) as grp from t1;
+select group_concat(c order by (select concat(t1.c,group_concat(c)) from t2 where a=t1.a)) as grp from t1;
select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp;
drop table t1,t2;
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index 4e4fb8f777a..229b24c73ca 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -379,6 +379,7 @@ select time_format('100:00:00', '%H %k %h %I %l');
create table t1 (a timestamp default '2005-05-05 01:01:01',
b timestamp default '2005-05-05 01:01:01');
delimiter //;
+drop function if exists t_slow_sysdate;
create function t_slow_sysdate() returns timestamp
begin
do sleep(2);
diff --git a/mysql-test/t/innodb_mysql.test b/mysql-test/t/innodb_mysql.test
index b942b9fbc0d..c0fb3ab917b 100644
--- a/mysql-test/t/innodb_mysql.test
+++ b/mysql-test/t/innodb_mysql.test
@@ -1,5 +1,54 @@
-- source include/have_innodb.inc
--disable_warnings
-drop table if exists t1;
+drop table if exists t1.t2;
--enable_warnings
+
+#
+# Test of behaviour with CREATE ... SELECT
+#
+
+set storage_engine=innodb;
+CREATE TABLE t1 (a int, b int);
+insert into t1 values (1,1),(1,2);
+--error 1062
+CREATE TABLE t2 (primary key (a)) select * from t1;
+# This should give warning
+drop table if exists t2;
+--error 1062
+CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
+# This should give warning
+drop table if exists t2;
+CREATE TABLE t2 (a int, b int, primary key (a));
+BEGIN;
+INSERT INTO t2 values(100,100);
+--error 1062
+CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+SELECT * from t2;
+ROLLBACK;
+SELECT * from t2;
+TRUNCATE table t2;
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * from t2;
+drop table t2;
+
+CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
+BEGIN;
+INSERT INTO t2 values(100,100);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+SELECT * from t2;
+COMMIT;
+BEGIN;
+INSERT INTO t2 values(101,101);
+--error 1062
+CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
+SELECT * from t2;
+ROLLBACK;
+SELECT * from t2;
+TRUNCATE table t2;
+--error 1062
+INSERT INTO t2 select * from t1;
+SELECT * from t2;
+drop table t1,t2;
diff --git a/mysql-test/t/insert.test b/mysql-test/t/insert.test
index f3dd8e7e199..e2514083ea7 100644
--- a/mysql-test/t/insert.test
+++ b/mysql-test/t/insert.test
@@ -9,8 +9,8 @@ drop table if exists t1,t2,t3;
create table t1 (a int not null);
insert into t1 values (1);
insert into t1 values (a+2);
-insert into t1 values (a+3);
-insert into t1 values (4),(a+5);
+insert into t1 values (a+3),(a+4);
+insert into t1 values (5),(a+6);
select * from t1;
drop table t1;
@@ -176,3 +176,28 @@ insert into t2 select t1.* from t1, t2 t, t3 where t1.id1 = t.id2 and t.id2 =
select count(*) from t2;
drop table t1,t2,t3;
+#
+# Test different cases of duplicate fields
+#
+
+create table t1 (a int, b int);
+insert into t1 (a,b) values (a,b);
+insert into t1 SET a=1, b=a+1;
+insert into t1 (a,b) select 1,2;
+INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
+prepare stmt1 from ' replace into t1 (a,a) select 100, ''hundred'' ';
+--error 1110
+execute stmt1;
+--error 1110
+insert into t1 (a,b,b) values (1,1,1);
+--error 1136
+insert into t1 (a,a) values (1,1,1);
+--error 1110
+insert into t1 (a,a) values (1,1);
+--error 1110
+insert into t1 SET a=1,b=2,a=1;
+--error 1110
+insert into t1 (b,b) select 1,2;
+--error 1110
+INSERT INTO t1 (b,b) SELECT 0,0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
+drop table t1;
diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test
index fcd127e3e98..aff5efa51cb 100644
--- a/mysql-test/t/loaddata.test
+++ b/mysql-test/t/loaddata.test
@@ -92,6 +92,10 @@ load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 (@a, @b);
select * from t1;
select @a, @b;
truncate table t1;
+# Reading of all columns with set
+load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 set c=b;
+select * from t1;
+truncate table t1;
# now going to test fixed field-row file format
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (a, b) set c="Wow";
select * from t1;
diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test
index acc816ae921..04c33e9d709 100644
--- a/mysql-test/t/multi_update.test
+++ b/mysql-test/t/multi_update.test
@@ -532,3 +532,33 @@ select * from t1;
select * from t2;
drop view v1;
drop table t1, t2;
+
+#
+# Test multi updates and deletes using primary key and without.
+#
+create table t1 (i1 int, i2 int, i3 int);
+create table t2 (id int, c1 varchar(20), c2 varchar(20));
+insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from t1 order by i1;
+select * from t2;
+update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from t1 order by i1;
+select * from t2 order by id;
+delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
+select * from t1 order by i1;
+select * from t2 order by id;
+drop table t1, t2;
+create table t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1));
+create table t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id));
+insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from t1 order by i1;
+select * from t2 order by id;
+update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from t1 order by i1;
+select * from t2 order by id;
+delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
+select * from t1 order by i1;
+select * from t2 order by id;
+drop table t1, t2;
diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test
index 8deff474587..e057ba537f6 100644
--- a/mysql-test/t/view_grant.test
+++ b/mysql-test/t/view_grant.test
@@ -3,7 +3,7 @@
--disable_warnings
drop database if exists mysqltest;
-drop view if exists v1;
+drop view if exists v1,v2,v3;
--enable_warnings
diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp
index 69e4ee933b8..1a08abcf953 100644
--- a/mysql-test/valgrind.supp
+++ b/mysql-test/valgrind.supp
@@ -173,3 +173,17 @@
fun:my_thread_init
fun:kill_server_thread
}
+
+#
+# Warning when printing stack trace (to suppress some not needed warnings)
+#
+
+{
+ vprintf on stacktrace
+ Memcheck:Cond
+ fun:vfprintf
+ fun:uffered_vfprintf
+ fun:vfprintf
+ fun:fprintf
+ fun:print_stacktrace
+}
diff --git a/mysys/base64.c b/mysys/base64.c
index 60218993c42..610797dd2ce 100644
--- a/mysys/base64.c
+++ b/mysys/base64.c
@@ -14,9 +14,10 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <base64.h>
+#include <my_global.h>
#include <m_string.h> /* strchr() */
#include <m_ctype.h> /* my_isspace() */
+#include <base64.h>
#ifndef MAIN
diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c
index 70edb507b5f..92b0cbb1371 100644
--- a/mysys/my_bitmap.c
+++ b/mysys/my_bitmap.c
@@ -46,8 +46,8 @@ void create_last_word_mask(MY_BITMAP *map)
unsigned int const used= 1U + ((map->n_bits-1U) & 0x7U);
/*
- * Create a mask with the upper 'unused' bits set and the lower 'used'
- * bits clear. The bits within each byte is stored in big-endian order.
+ Create a mask with the upper 'unused' bits set and the lower 'used'
+ bits clear. The bits within each byte is stored in big-endian order.
*/
unsigned char const mask= (~((1 << used) - 1)) & 255;
@@ -60,13 +60,11 @@ void create_last_word_mask(MY_BITMAP *map)
unsigned char *ptr= (unsigned char*)&map->last_word_mask;
map->last_word_ptr= map->bitmap + no_words_in_map(map)-1;
- switch (no_bytes_in_map(map)&3)
- {
+ switch (no_bytes_in_map(map) & 3) {
case 1:
map->last_word_mask= ~0U;
ptr[0]= mask;
return;
-
case 2:
map->last_word_mask= ~0U;
ptr[0]= 0;
@@ -84,6 +82,7 @@ void create_last_word_mask(MY_BITMAP *map)
}
}
+
static inline void bitmap_lock(MY_BITMAP *map __attribute__((unused)))
{
#ifdef THREAD
@@ -101,37 +100,41 @@ static inline void bitmap_unlock(MY_BITMAP *map __attribute__((unused)))
}
-my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits,
+my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits,
my_bool thread_safe)
{
DBUG_ENTER("bitmap_init");
- DBUG_ASSERT(n_bits > 0);
if (!buf)
{
- uint size_in_bytes= ((n_bits+31)/32)*4
+ uint size_in_bytes= bitmap_buffer_size(n_bits);
+ uint extra= 0;
#ifdef THREAD
- +(thread_safe ? sizeof(pthread_mutex_t) : 0)
+ if (thread_safe)
+ {
+ size_in_bytes= ALIGN_SIZE(size_in_bytes);
+ extra= sizeof(pthread_mutex_t);
+ }
+ map->mutex= 0;
#endif
- ;
- if (!(buf= (uint32*) my_malloc(size_in_bytes, MYF(MY_WME))))
+ if (!(buf= (my_bitmap_map*) my_malloc(size_in_bytes+extra, MYF(MY_WME))))
DBUG_RETURN(1);
- }
#ifdef THREAD
- else
- DBUG_ASSERT(thread_safe == 0);
+ if (thread_safe)
+ {
+ map->mutex= (pthread_mutex_t *) ((char*) buf + size_in_bytes);
+ pthread_mutex_init(map->mutex, MY_MUTEX_INIT_FAST);
+ }
#endif
+ }
#ifdef THREAD
- if (thread_safe)
+ else
{
- map->mutex=(pthread_mutex_t *)buf;
- pthread_mutex_init(map->mutex, MY_MUTEX_INIT_FAST);
- buf+= sizeof(pthread_mutex_t)/4;
+ DBUG_ASSERT(thread_safe == 0);
}
- else
- map->mutex=0;
#endif
+
map->bitmap= buf;
- map->n_bits=n_bits;
+ map->n_bits= n_bits;
create_last_word_mask(map);
bitmap_clear_all(map);
DBUG_RETURN(0);
@@ -144,15 +147,10 @@ void bitmap_free(MY_BITMAP *map)
if (map->bitmap)
{
#ifdef THREAD
- char *buf= (char *)map->mutex;
- if (buf)
+ if (map->mutex)
pthread_mutex_destroy(map->mutex);
- else
- buf=(char*) map->bitmap;
- my_free(buf, MYF(0));
-#else
- my_free((char*) map->bitmap, MYF(0));
#endif
+ my_free((char*) map->bitmap, MYF(0));
map->bitmap=0;
}
DBUG_VOID_RETURN;
@@ -205,6 +203,40 @@ my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit)
return res;
}
+/*
+ test if bit already set and clear it if it was set(thread unsafe method)
+
+ SYNOPSIS
+ bitmap_fast_test_and_set()
+ MAP bit map struct
+ BIT bit number
+
+ RETURN
+ 0 bit was not set
+ !=0 bit was set
+*/
+
+my_bool bitmap_fast_test_and_clear(MY_BITMAP *map, uint bitmap_bit)
+{
+ uchar *byte= (uchar*) map->bitmap + (bitmap_bit / 8);
+ uchar bit= 1 << ((bitmap_bit) & 7);
+ uchar res= (*byte) & bit;
+ *byte&= ~bit;
+ return res;
+}
+
+
+my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit)
+{
+ my_bool res;
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
+ bitmap_lock(map);
+ res= bitmap_fast_test_and_clear(map, bitmap_bit);
+ bitmap_unlock(map);
+ return res;
+}
+
+
uint bitmap_set_next(MY_BITMAP *map)
{
uint bit_found;
@@ -230,7 +262,6 @@ void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size)
*m++= (1 << prefix_bits)-1;
if ((d= no_bytes_in_map(map)-prefix_bytes))
bzero(m, d);
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits*/
}
@@ -247,7 +278,7 @@ my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size)
if (*m++ != 0xff)
return 0;
- *map->last_word_ptr^= map->last_word_mask; /*Clear bits*/
+ *map->last_word_ptr&= ~map->last_word_mask; /*Clear bits*/
res= 0;
if (prefix_bits && *m++ != (1 << prefix_bits)-1)
goto ret;
@@ -257,15 +288,15 @@ my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size)
goto ret;
res= 1;
ret:
- *map->last_word_ptr|= map->last_word_mask; /*Set bits again*/
return res;
}
my_bool bitmap_is_set_all(const MY_BITMAP *map)
{
- uint32 *data_ptr= map->bitmap;
- uint32 *end= map->last_word_ptr;
+ my_bitmap_map *data_ptr= map->bitmap;
+ my_bitmap_map *end= map->last_word_ptr;
+ *map->last_word_ptr |= map->last_word_mask;
for (; data_ptr <= end; data_ptr++)
if (*data_ptr != 0xFFFFFFFF)
return FALSE;
@@ -275,9 +306,9 @@ my_bool bitmap_is_set_all(const MY_BITMAP *map)
my_bool bitmap_is_clear_all(const MY_BITMAP *map)
{
- uint32 *data_ptr= map->bitmap;
- uint32 *end;
- if (*map->last_word_ptr != map->last_word_mask)
+ my_bitmap_map *data_ptr= map->bitmap;
+ my_bitmap_map *end;
+ if (*map->last_word_ptr & ~map->last_word_mask)
return FALSE;
end= map->last_word_ptr;
for (; data_ptr < end; data_ptr++)
@@ -286,16 +317,18 @@ my_bool bitmap_is_clear_all(const MY_BITMAP *map)
return TRUE;
}
+/* Return TRUE if map1 is a subset of map2 */
my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
{
- uint32 *m1= map1->bitmap, *m2= map2->bitmap, *end;
+ my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end;
DBUG_ASSERT(map1->bitmap && map2->bitmap &&
map1->n_bits==map2->n_bits);
end= map1->last_word_ptr;
-
+ *map1->last_word_ptr &= ~map1->last_word_mask;
+ *map2->last_word_ptr &= ~map2->last_word_mask;
while (m1 <= end)
{
if ((*m1++) & ~(*m2++))
@@ -304,16 +337,36 @@ my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
return 1;
}
+/* True if bitmaps has any common bits */
+
+my_bool bitmap_is_overlapping(const MY_BITMAP *map1, const MY_BITMAP *map2)
+{
+ my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end;
+
+ DBUG_ASSERT(map1->bitmap && map2->bitmap &&
+ map1->n_bits==map2->n_bits);
+
+ end= map1->last_word_ptr;
+ *map1->last_word_ptr &= ~map1->last_word_mask;
+ *map2->last_word_ptr &= ~map2->last_word_mask;
+ while (m1 <= end)
+ {
+ if ((*m1++) & (*m2++))
+ return 1;
+ }
+ return 0;
+}
+
void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+ my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
uint len= no_words_in_map(map), len2 = no_words_in_map(map2);
DBUG_ASSERT(map->bitmap && map2->bitmap);
end= to+min(len,len2);
- *map2->last_word_ptr^= map2->last_word_mask; /*Clear last bits in map2*/
+ *map2->last_word_ptr&= ~map2->last_word_mask; /*Clear last bits in map2*/
while (to < end)
*to++ &= *from++;
@@ -323,8 +376,6 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
while (to < end)
*to++=0;
}
- *map2->last_word_ptr|= map2->last_word_mask; /*Set last bits in map*/
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits in map2*/
}
@@ -356,13 +407,12 @@ void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit)
while (to < end)
*to++= use_byte;
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
}
void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+ my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
DBUG_ASSERT(map->bitmap && map2->bitmap &&
map->n_bits==map2->n_bits);
@@ -370,13 +420,12 @@ void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
while (to <= end)
*to++ &= ~(*from++);
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
}
void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+ my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
DBUG_ASSERT(map->bitmap && map2->bitmap &&
map->n_bits==map2->n_bits);
@@ -389,25 +438,23 @@ void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uint32 *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr;
+ my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr;
DBUG_ASSERT(map->bitmap && map2->bitmap &&
map->n_bits==map2->n_bits);
while (to <= end)
*to++ ^= *from++;
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
}
void bitmap_invert(MY_BITMAP *map)
{
- uint32 *to= map->bitmap, *end;
+ my_bitmap_map *to= map->bitmap, *end;
DBUG_ASSERT(map->bitmap);
end= map->last_word_ptr;
while (to <= end)
*to++ ^= 0xFFFFFFFF;
- *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
}
@@ -418,21 +465,35 @@ uint bitmap_bits_set(const MY_BITMAP *map)
uint res= 0;
DBUG_ASSERT(map->bitmap);
- *map->last_word_ptr^=map->last_word_mask; /*Reset last bits to zero*/
+ *map->last_word_ptr&= ~map->last_word_mask; /*Reset last bits to zero*/
while (m < end)
res+= my_count_bits_ushort(*m++);
- *map->last_word_ptr^=map->last_word_mask; /*Set last bits to one again*/
return res;
}
+
+void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
+
+ DBUG_ASSERT(map->bitmap && map2->bitmap &&
+ map->n_bits==map2->n_bits);
+ end= map->last_word_ptr;
+ while (to <= end)
+ *to++ = *from++;
+}
+
+
uint bitmap_get_first_set(const MY_BITMAP *map)
{
uchar *byte_ptr;
- uint bit_found,i,j,k;
- uint32 *data_ptr, *end= map->last_word_ptr;
+ uint i,j,k;
+ my_bitmap_map *data_ptr, *end= map->last_word_ptr;
DBUG_ASSERT(map->bitmap);
data_ptr= map->bitmap;
+ *map->last_word_ptr &= ~map->last_word_mask;
+
for (i=0; data_ptr <= end; data_ptr++, i++)
{
if (*data_ptr)
@@ -445,12 +506,7 @@ uint bitmap_get_first_set(const MY_BITMAP *map)
for (k=0; ; k++)
{
if (*byte_ptr & (1 << k))
- {
- bit_found= (i*32) + (j*8) + k;
- if (bit_found == map->n_bits)
- return MY_BIT_NONE;
- return bit_found;
- }
+ return (i*32) + (j*8) + k;
}
DBUG_ASSERT(0);
}
@@ -465,11 +521,13 @@ uint bitmap_get_first_set(const MY_BITMAP *map)
uint bitmap_get_first(const MY_BITMAP *map)
{
uchar *byte_ptr;
- uint bit_found= MY_BIT_NONE, i,j,k;
- uint32 *data_ptr, *end= map->last_word_ptr;
+ uint i,j,k;
+ my_bitmap_map *data_ptr, *end= map->last_word_ptr;
DBUG_ASSERT(map->bitmap);
data_ptr= map->bitmap;
+ *map->last_word_ptr|= map->last_word_mask;
+
for (i=0; data_ptr <= end; data_ptr++, i++)
{
if (*data_ptr != 0xFFFFFFFF)
@@ -482,12 +540,7 @@ uint bitmap_get_first(const MY_BITMAP *map)
for (k=0; ; k++)
{
if (!(*byte_ptr & (1 << k)))
- {
- bit_found= (i*32) + (j*8) + k;
- if (bit_found == map->n_bits)
- return MY_BIT_NONE;
- return bit_found;
- }
+ return (i*32) + (j*8) + k;
}
DBUG_ASSERT(0);
}
@@ -705,16 +758,6 @@ void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit)
#endif
#ifdef MAIN
-static void bitmap_print(MY_BITMAP *map)
-{
- uint32 *to= map->bitmap, *end= map->last_word_ptr;
- while (to <= end)
- {
- fprintf(stderr,"0x%x ", *to++);
- }
- fprintf(stderr,"\n");
-}
-
uint get_rand_bit(uint bitsize)
{
return (rand() % bitsize);
@@ -766,7 +809,8 @@ error2:
return TRUE;
}
-bool test_operators(MY_BITMAP *map, uint bitsize)
+bool test_operators(MY_BITMAP *map __attribute__((unused)),
+ uint bitsize __attribute__((unused)))
{
return FALSE;
}
@@ -819,8 +863,8 @@ bool test_compare_operators(MY_BITMAP *map, uint bitsize)
uint no_loops= bitsize > 128 ? 128 : bitsize;
MY_BITMAP map2_obj, map3_obj;
MY_BITMAP *map2= &map2_obj, *map3= &map3_obj;
- uint32 map2buf[1024];
- uint32 map3buf[1024];
+ my_bitmap_map map2buf[1024];
+ my_bitmap_map map3buf[1024];
bitmap_init(&map2_obj, map2buf, bitsize, FALSE);
bitmap_init(&map3_obj, map3buf, bitsize, FALSE);
bitmap_clear_all(map2);
@@ -947,7 +991,7 @@ error2:
bool test_get_first_bit(MY_BITMAP *map, uint bitsize)
{
- uint i, j, test_bit;
+ uint i, test_bit;
uint no_loops= bitsize > 128 ? 128 : bitsize;
for (i=0; i < no_loops; i++)
{
@@ -1027,7 +1071,7 @@ error3:
bool do_test(uint bitsize)
{
MY_BITMAP map;
- uint32 buf[1024];
+ my_bitmap_map buf[1024];
if (bitmap_init(&map, buf, bitsize, FALSE))
{
printf("init error for bitsize %d", bitsize);
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index 4b3e03750c8..b4de57229bf 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -480,7 +480,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
data->type=lock_type;
data->owner= owner; /* Must be reset ! */
VOID(pthread_mutex_lock(&lock->mutex));
- DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d",
+ DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx type: %d",
data, data->owner->info->thread_id,
lock, (int) lock_type));
check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ?
@@ -499,7 +499,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
and the read lock is not TL_READ_NO_INSERT
*/
- DBUG_PRINT("lock",("write locked by thread: %ld",
+ DBUG_PRINT("lock",("write locked by thread: 0x%lx",
lock->write.data->owner->info->thread_id));
if (thr_lock_owner_equal(data->owner, lock->write.data->owner) ||
(lock->write.data->type <= TL_WRITE_DELAYED &&
@@ -621,7 +621,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
statistic_increment(locks_immediate,&THR_LOCK_lock);
goto end;
}
- DBUG_PRINT("lock",("write locked by thread: %ld",
+ DBUG_PRINT("lock",("write locked by thread: 0x%lx",
lock->write.data->owner->info->thread_id));
}
else
@@ -657,7 +657,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
goto end;
}
}
- DBUG_PRINT("lock",("write locked by thread: %ld, type: %ld",
+ DBUG_PRINT("lock",("write locked by thread: 0x%lx, type: %ld",
lock->read.data->owner->info->thread_id, data->type));
}
wait_queue= &lock->write_wait;
@@ -719,7 +719,7 @@ static inline void free_all_read_locks(THR_LOCK *lock,
}
lock->read_no_write_count++;
}
- DBUG_PRINT("lock",("giving read lock to thread: %ld",
+ DBUG_PRINT("lock",("giving read lock to thread: 0x%lx",
data->owner->info->thread_id));
data->cond=0; /* Mark thread free */
VOID(pthread_cond_signal(cond));
@@ -737,7 +737,7 @@ void thr_unlock(THR_LOCK_DATA *data)
THR_LOCK *lock=data->lock;
enum thr_lock_type lock_type=data->type;
DBUG_ENTER("thr_unlock");
- DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx",
+ DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx",
data, data->owner->info->thread_id, lock));
pthread_mutex_lock(&lock->mutex);
check_locks(lock,"start of release lock",0);
@@ -797,7 +797,7 @@ void thr_unlock(THR_LOCK_DATA *data)
if (data->type == TL_WRITE_CONCURRENT_INSERT &&
(*lock->check_status)(data->status_param))
data->type=TL_WRITE; /* Upgrade lock */
- DBUG_PRINT("lock",("giving write lock of type %d to thread: %ld",
+ DBUG_PRINT("lock",("giving write lock of type %d to thread: 0x%lx",
data->type, data->owner->info->thread_id));
{
pthread_cond_t *cond=data->cond;
diff --git a/sql/event.cc b/sql/event.cc
index 4a3c6aad30c..c049fdc6c61 100644
--- a/sql/event.cc
+++ b/sql/event.cc
@@ -32,8 +32,8 @@
should be replicated as disabled. If an event is ALTERed as DISABLED the
query should go untouched into the binary log, when ALTERed as enable then
it should go as SLAVESIDE_DISABLED. This is regarding the SQL interface.
- TT routines however modify mysql.event internally and this does not go the log
- so in this case queries has to be injected into the log...somehow... or
+ TT routines however modify mysql.event internally and this does not go the
+ log so in this case queries has to be injected into the log...somehow... or
maybe a solution is RBR for this case, because the event may go only from
ENABLED to DISABLED status change and this is safe for replicating. As well
an event may be deleted which is also safe for RBR.
@@ -56,9 +56,9 @@
- Move comparison code to class Event_timed
Warning:
- - For now parallel execution is not possible because the same sp_head cannot be
- executed few times!!! There is still no lock attached to particular event.
-
+ - For now parallel execution is not possible because the same sp_head cannot
+ be executed few times!!! There is still no lock attached to particular
+ event.
*/
@@ -466,10 +466,10 @@ common_1_lev_code:
Open mysql.event table for read
SYNOPSIS
- evex_open_event_table_for_read()
- thd Thread context
- lock_type How to lock the table
- table The table pointer
+ evex_open_event_table()
+ thd Thread context
+ lock_type How to lock the table
+ table We will store the open table here
RETURN
1 Cannot lock table
@@ -499,7 +499,7 @@ evex_open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table)
DBUG_RETURN(2);
}
*table= tables.table;
-
+ tables.table->use_all_columns();
DBUG_RETURN(0);
}
@@ -570,10 +570,12 @@ evex_db_find_event_by_name(THD *thd, const LEX_STRING dbname,
table->field[EVEX_FIELD_DEFINER]->store(user_name.str, user_name.length,
&my_charset_bin);
- key_copy(key, table->record[0], table->key_info, table->key_info->key_length);
+ key_copy(key, table->record[0], table->key_info,
+ table->key_info->key_length);
if (table->file->index_read_idx(table->record[0], 0, key,
- table->key_info->key_length,HA_READ_KEY_EXACT))
+ table->key_info->key_length,
+ HA_READ_KEY_EXACT))
DBUG_RETURN(EVEX_KEY_NOT_FOUND);
DBUG_RETURN(0);
@@ -755,7 +757,8 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not,
}
DBUG_PRINT("info", ("non-existant, go forward"));
- if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0,
+ &dbchanged)))
{
my_error(ER_BAD_DB_ERROR, MYF(0));
goto err;
@@ -1471,7 +1474,9 @@ evex_drop_db_events(THD *thd, char *db)
if ((ret= evex_open_event_table(thd, TL_WRITE, &table)))
{
- sql_print_error("Table mysql.event is damaged.");
+ if (errno != ENOENT)
+ sql_print_error("Table mysql.event is damaged. Got errno: %d on open",
+ my_errno);
VOID(pthread_mutex_unlock(&LOCK_event_arrays));
DBUG_RETURN(SP_OPEN_TABLE_FAILED);
}
diff --git a/sql/field.cc b/sql/field.cc
index 9c504f186b3..4818ad47151 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -50,6 +50,9 @@ const char field_separator=',';
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
+#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table->read_set || bitmap_is_set(table->read_set, field_index))
+#define ASSERT_COLUMN_MARKED_FOR_WRITE DBUG_ASSERT(!table->write_set || bitmap_is_set(table->write_set, field_index))
+
/*
Rules for merging different types of fields in UNION
@@ -1201,9 +1204,11 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs)
String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length= 21;
longlong value= val_int();
+
if (val_buffer->alloc(length))
return 0;
length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(),
@@ -1221,16 +1226,17 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
:ptr(ptr_arg),null_ptr(null_ptr_arg),
table(0), orig_table(0), table_name(0),
field_name(field_name_arg),
- query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
- unireg_check(unireg_check_arg),
+ key_start(0), part_of_key(0), part_of_key_not_clustered(0),
+ part_of_sortkey(0), unireg_check(unireg_check_arg),
field_length(length_arg),null_bit(null_bit_arg)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
comment.length=0;
- fieldnr= 0;
+ field_index= 0;
}
+
uint Field::offset()
{
return (uint) (ptr - (char*) table->record[0]);
@@ -1354,6 +1360,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
int Field_num::store_decimal(const my_decimal *val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err= 0;
longlong i= convert_decimal2longlong(val, unsigned_flag, &err);
return test(err | store(i, unsigned_flag));
@@ -1378,6 +1385,7 @@ int Field_num::store_decimal(const my_decimal *val)
my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
DBUG_ASSERT(result_type() == INT_RESULT);
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value);
@@ -1423,6 +1431,7 @@ void Field_num::make_field(Send_field *field)
int Field_str::store_decimal(const my_decimal *d)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
double val;
/* TODO: use decimal2string? */
int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR &
@@ -1433,6 +1442,7 @@ int Field_str::store_decimal(const my_decimal *d)
my_decimal *Field_str::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, 0, decimal_value);
return decimal_value;
@@ -1498,6 +1508,7 @@ bool Field::get_time(TIME *ltime)
int Field::store_time(TIME *ltime, timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= (uint) my_TIME_to_str(ltime, buff);
return store(buff, length, &my_charset_bin);
@@ -1720,6 +1731,7 @@ void Field_decimal::overflow(bool negative)
int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff,sizeof(buff), &my_charset_bin);
@@ -2089,6 +2101,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
int Field_decimal::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
if (unsigned_flag && nr < 0)
{
overflow(1);
@@ -2134,6 +2147,7 @@ int Field_decimal::store(double nr)
int Field_decimal::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[22];
uint length, int_part;
char fyllchar, *to;
@@ -2168,6 +2182,7 @@ int Field_decimal::store(longlong nr, bool unsigned_val)
double Field_decimal::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
return my_strntod(&my_charset_bin, ptr, field_length, &end_not_used,
@@ -2176,6 +2191,7 @@ double Field_decimal::val_real(void)
longlong Field_decimal::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
if (unsigned_flag)
return my_strntoull(&my_charset_bin, ptr, field_length, 10, NULL,
@@ -2189,6 +2205,7 @@ longlong Field_decimal::val_int(void)
String *Field_decimal::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char *str;
for (str=ptr ; *str == ' ' ; str++) ;
uint tmp_length=(uint) (str-ptr);
@@ -2365,6 +2382,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
bool Field_new_decimal::store_value(const my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
DBUG_ENTER("Field_new_decimal::store_value");
#ifndef DBUG_OFF
@@ -2409,6 +2427,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
int Field_new_decimal::store(const char *from, uint length,
CHARSET_INFO *charset)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err;
my_decimal decimal_value;
DBUG_ENTER("Field_new_decimal::store(char*)");
@@ -2456,6 +2475,7 @@ int Field_new_decimal::store(const char *from, uint length,
int Field_new_decimal::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
my_decimal decimal_value;
int err;
DBUG_ENTER("Field_new_decimal::store(double)");
@@ -2490,6 +2510,7 @@ int Field_new_decimal::store(double nr)
int Field_new_decimal::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
my_decimal decimal_value;
int err;
@@ -2511,12 +2532,14 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
int Field_new_decimal::store_decimal(const my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
return store_value(decimal_value);
}
double Field_new_decimal::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double dbl;
my_decimal decimal_value;
my_decimal2double(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), &dbl);
@@ -2526,6 +2549,7 @@ double Field_new_decimal::val_real(void)
longlong Field_new_decimal::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong i;
my_decimal decimal_value;
my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
@@ -2536,6 +2560,7 @@ longlong Field_new_decimal::val_int(void)
my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
DBUG_ENTER("Field_new_decimal::val_decimal");
binary2my_decimal(E_DEC_FATAL_ERROR, ptr, decimal_value,
precision, dec);
@@ -2548,6 +2573,7 @@ my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
String *Field_new_decimal::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
my_decimal decimal_value;
uint fixed_precision= zerofill ? precision : 0;
my_decimal2string(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
@@ -2584,6 +2610,7 @@ void Field_new_decimal::sql_type(String &str) const
int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
@@ -2630,6 +2657,7 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_tiny::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -2672,6 +2700,7 @@ int Field_tiny::store(double nr)
int Field_tiny::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (unsigned_flag)
@@ -2716,6 +2745,7 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
double Field_tiny::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
(int) ((signed char*) ptr)[0];
return (double) tmp;
@@ -2724,6 +2754,7 @@ double Field_tiny::val_real(void)
longlong Field_tiny::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
(int) ((signed char*) ptr)[0];
return (longlong) tmp;
@@ -2733,6 +2764,7 @@ longlong Field_tiny::val_int(void)
String *Field_tiny::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,5*cs->mbmaxlen);
@@ -2788,6 +2820,7 @@ void Field_tiny::sql_type(String &res) const
int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
@@ -2841,6 +2874,7 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_short::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int16 res;
nr=rint(nr);
@@ -2892,6 +2926,7 @@ int Field_short::store(double nr)
int Field_short::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int16 res;
@@ -2946,6 +2981,7 @@ int Field_short::store(longlong nr, bool unsigned_val)
double Field_short::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
short j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -2958,6 +2994,7 @@ double Field_short::val_real(void)
longlong Field_short::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
short j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -2972,6 +3009,7 @@ longlong Field_short::val_int(void)
String *Field_short::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,7*cs->mbmaxlen);
@@ -3062,6 +3100,7 @@ void Field_short::sql_type(String &res) const
int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
@@ -3109,6 +3148,7 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_medium::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -3154,6 +3194,7 @@ int Field_medium::store(double nr)
int Field_medium::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (unsigned_flag)
@@ -3202,6 +3243,7 @@ int Field_medium::store(longlong nr, bool unsigned_val)
double Field_medium::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (double) j;
}
@@ -3209,6 +3251,7 @@ double Field_medium::val_real(void)
longlong Field_medium::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (longlong) j;
}
@@ -3217,6 +3260,7 @@ longlong Field_medium::val_int(void)
String *Field_medium::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,10*cs->mbmaxlen);
@@ -3234,6 +3278,7 @@ String *Field_medium::val_str(String *val_buffer,
bool Field_medium::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_long(Field_medium::val_int());
}
@@ -3298,6 +3343,7 @@ static bool test_if_minus(CHARSET_INFO *cs,
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
ulong tmp_scan;
longlong tmp;
long store_tmp;
@@ -3370,6 +3416,7 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_long::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int32 res;
nr=rint(nr);
@@ -3421,6 +3468,7 @@ int Field_long::store(double nr)
int Field_long::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int32 res;
DBUG_ASSERT(table->in_use == current_thd); // General safety
@@ -3474,6 +3522,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
double Field_long::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3486,6 +3535,7 @@ double Field_long::val_real(void)
longlong Field_long::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
/* See the comment in Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
@@ -3501,6 +3551,7 @@ longlong Field_long::val_int(void)
String *Field_long::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,12*cs->mbmaxlen);
@@ -3527,6 +3578,7 @@ String *Field_long::val_str(String *val_buffer,
bool Field_long::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_long(Field_long::val_int());
}
@@ -3591,6 +3643,7 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
longlong tmp;
int error= 0;
char *end;
@@ -3632,6 +3685,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_longlong::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
longlong res;
@@ -3683,6 +3737,7 @@ int Field_longlong::store(double nr)
int Field_longlong::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (nr < 0) // Only possible error
@@ -3713,6 +3768,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
double Field_longlong::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3734,6 +3790,7 @@ double Field_longlong::val_real(void)
longlong Field_longlong::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3772,6 +3829,7 @@ String *Field_longlong::val_str(String *val_buffer,
bool Field_longlong::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_longlong(Field_longlong::val_int(), unsigned_flag);
}
@@ -3864,6 +3922,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_float::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
float j;
int error= 0;
@@ -3928,12 +3987,14 @@ int Field_float::store(double nr)
int Field_float::store(longlong nr, bool unsigned_val)
{
- return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
+ return Field_float::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
+ (double) nr);
}
double Field_float::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
float j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3964,6 +4025,7 @@ longlong Field_float::val_int(void)
String *Field_float::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
float nr;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4109,6 +4171,7 @@ void Field_float::sort_string(char *to,uint length __attribute__((unused)))
bool Field_float::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store((float) Field_float::val_real(), dec, (String*) 0);
}
@@ -4152,6 +4215,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_double::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (isnan(nr))
@@ -4209,7 +4273,8 @@ int Field_double::store(double nr)
int Field_double::store(longlong nr, bool unsigned_val)
{
- return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
+ return Field_double::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
+ (double) nr);
}
@@ -4222,6 +4287,7 @@ int Field_real::store_decimal(const my_decimal *dm)
double Field_double::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4236,6 +4302,7 @@ double Field_double::val_real(void)
longlong Field_double::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double j;
longlong res;
#ifdef WORDS_BIGENDIAN
@@ -4275,6 +4342,7 @@ warn:
my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double2my_decimal(E_DEC_FATAL_ERROR, val_real(), decimal_value);
return decimal_value;
}
@@ -4283,6 +4351,7 @@ my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
String *Field_double::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double nr;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4371,6 +4440,7 @@ bool Field_double::send_binary(Protocol *protocol)
int Field_double::cmp(const char *a_ptr, const char *b_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double a,b;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4529,6 +4599,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
my_time_t tmp= 0;
int error;
@@ -4599,6 +4670,7 @@ int Field_timestamp::store(double nr)
int Field_timestamp::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
my_time_t timestamp= 0;
int error;
@@ -4650,11 +4722,13 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
double Field_timestamp::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (double) Field_timestamp::val_int();
}
longlong Field_timestamp::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 temp;
TIME time_tmp;
THD *thd= table->in_use;
@@ -4680,6 +4754,7 @@ longlong Field_timestamp::val_int(void)
String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 temp, temp2;
TIME time_tmp;
THD *thd= table->in_use;
@@ -4909,6 +4984,7 @@ int Field_time::store_time(TIME *ltime, timestamp_type type)
int Field_time::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (nr > 8385959.0)
@@ -4946,6 +5022,7 @@ int Field_time::store(double nr)
int Field_time::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (nr < (longlong) -8385959L && !unsigned_val)
@@ -4983,12 +5060,14 @@ int Field_time::store(longlong nr, bool unsigned_val)
double Field_time::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 j= (uint32) uint3korr(ptr);
return (double) j;
}
longlong Field_time::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (longlong) sint3korr(ptr);
}
@@ -5001,6 +5080,7 @@ longlong Field_time::val_int(void)
String *Field_time::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
TIME ltime;
val_buffer->alloc(19);
long tmp=(long) sint3korr(ptr);
@@ -5110,6 +5190,7 @@ void Field_time::sql_type(String &res) const
int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char *end;
int error;
long nr= my_strntol(cs, from, len, 10, &end, &error);
@@ -5148,6 +5229,7 @@ int Field_year::store(double nr)
int Field_year::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155)
{
*ptr= 0;
@@ -5168,6 +5250,7 @@ int Field_year::store(longlong nr, bool unsigned_val)
bool Field_year::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulonglong tmp= Field_year::val_int();
return protocol->store_short(tmp);
}
@@ -5181,6 +5264,7 @@ double Field_year::val_real(void)
longlong Field_year::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= (int) ((uchar*) ptr)[0];
if (field_length != 4)
tmp%=100; // Return last 2 char
@@ -5218,6 +5302,7 @@ void Field_year::sql_type(String &res) const
int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
uint32 tmp;
int error;
@@ -5273,6 +5358,7 @@ int Field_date::store(double nr)
int Field_date::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME not_used;
int error;
longlong initial_nr= nr;
@@ -5323,6 +5409,7 @@ bool Field_date::send_binary(Protocol *protocol)
double Field_date::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -5336,6 +5423,7 @@ double Field_date::val_real(void)
longlong Field_date::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -5350,6 +5438,7 @@ longlong Field_date::val_int(void)
String *Field_date::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
TIME ltime;
val_buffer->alloc(field_length);
int32 tmp;
@@ -5421,6 +5510,7 @@ void Field_date::sql_type(String &res) const
int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
long tmp;
int error;
@@ -5461,6 +5551,7 @@ int Field_newdate::store(double nr)
int Field_newdate::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
longlong tmp;
int error;
@@ -5489,6 +5580,7 @@ int Field_newdate::store(longlong nr, bool unsigned_val)
int Field_newdate::store_time(TIME *ltime,timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
@@ -5514,12 +5606,14 @@ bool Field_newdate::send_binary(Protocol *protocol)
double Field_newdate::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (double) Field_newdate::val_int();
}
longlong Field_newdate::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulong j= uint3korr(ptr);
j= (j % 32L)+(j / 32L % 16L)*100L + (j/(16L*32L))*10000L;
return (longlong) j;
@@ -5529,6 +5623,7 @@ longlong Field_newdate::val_int(void)
String *Field_newdate::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
val_buffer->alloc(field_length);
val_buffer->length(field_length);
uint32 tmp=(uint32) uint3korr(ptr);
@@ -5605,6 +5700,7 @@ void Field_newdate::sql_type(String &res) const
int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME time_tmp;
int error;
ulonglong tmp= 0;
@@ -5656,6 +5752,7 @@ int Field_datetime::store(double nr)
int Field_datetime::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME not_used;
int error;
longlong initial_nr= nr;
@@ -5692,6 +5789,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val)
int Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
longlong tmp;
int error= 0;
/*
@@ -5733,6 +5831,7 @@ double Field_datetime::val_real(void)
longlong Field_datetime::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -5747,6 +5846,7 @@ longlong Field_datetime::val_int(void)
String *Field_datetime::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
val_buffer->alloc(field_length);
val_buffer->length(field_length);
ulonglong tmp;
@@ -5878,6 +5978,7 @@ void Field_datetime::sql_type(String &res) const
int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0, well_formed_error;
uint32 not_used;
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -5954,6 +6055,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_str::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
uint length;
bool use_scientific_notation= TRUE;
@@ -6029,6 +6131,7 @@ int Field_longstr::store_decimal(const my_decimal *d)
double Field_string::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
CHARSET_INFO *cs= charset();
@@ -6038,6 +6141,7 @@ double Field_string::val_real(void)
longlong Field_string::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
CHARSET_INFO *cs=charset();
@@ -6048,6 +6152,7 @@ longlong Field_string::val_int(void)
String *Field_string::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= field_charset->cset->lengthsp(field_charset, ptr, field_length);
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
@@ -6058,6 +6163,7 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(),
decimal_value);
return decimal_value;
@@ -6295,6 +6401,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
uint32 not_used, copy_length;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmpstr(buff,sizeof(buff), &my_charset_bin);
@@ -6369,6 +6476,7 @@ int Field_varstring::store(longlong nr, bool unsigned_val)
double Field_varstring::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
@@ -6379,6 +6487,7 @@ double Field_varstring::val_real(void)
longlong Field_varstring::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
@@ -6389,6 +6498,7 @@ longlong Field_varstring::val_int(void)
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
val_ptr->set((const char*) ptr+length_bytes, length, field_charset);
return val_ptr;
@@ -6397,6 +6507,7 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
str2my_decimal(E_DEC_FATAL_ERROR, ptr+length_bytes, length, charset(),
decimal_value);
@@ -6937,6 +7048,7 @@ void Field_blob::put_length(char *pos, uint32 length)
int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0, well_formed_error;
if (!length)
{
@@ -7023,6 +7135,7 @@ int Field_blob::store(longlong nr, bool unsigned_val)
double Field_blob::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used, *blob;
uint32 length;
@@ -7039,6 +7152,7 @@ double Field_blob::val_real(void)
longlong Field_blob::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *blob;
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
@@ -7051,6 +7165,7 @@ longlong Field_blob::val_int(void)
String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char *blob;
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
if (!blob)
@@ -7063,6 +7178,7 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
const char *blob;
memcpy_fixed(&blob, ptr+packlength, sizeof(const char*));
if (!blob)
@@ -7631,6 +7747,7 @@ void Field_enum::store_type(ulonglong value)
int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err= 0;
uint32 not_used;
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -7677,6 +7794,7 @@ int Field_enum::store(double nr)
int Field_enum::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if ((ulonglong) nr > typelib->count || nr == 0)
{
@@ -7697,44 +7815,45 @@ double Field_enum::val_real(void)
longlong Field_enum::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
switch (packlength) {
case 1:
return (longlong) (uchar) ptr[0];
case 2:
- {
- uint16 tmp;
+ {
+ uint16 tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=sint2korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=sint2korr(ptr);
+ else
#endif
- shortget(tmp,ptr);
- return (longlong) tmp;
- }
+ shortget(tmp,ptr);
+ return (longlong) tmp;
+ }
case 3:
return (longlong) uint3korr(ptr);
case 4:
- {
- uint32 tmp;
+ {
+ uint32 tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=uint4korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=uint4korr(ptr);
+ else
#endif
- longget(tmp,ptr);
- return (longlong) tmp;
- }
+ longget(tmp,ptr);
+ return (longlong) tmp;
+ }
case 8:
- {
- longlong tmp;
+ {
+ longlong tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=sint8korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=sint8korr(ptr);
+ else
#endif
- longlongget(tmp,ptr);
- return tmp;
- }
+ longlongget(tmp,ptr);
+ return tmp;
+ }
}
return 0; // impossible
}
@@ -7812,6 +7931,7 @@ void Field_enum::sql_type(String &res) const
int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
bool got_warning= 0;
int err= 0;
char *not_used;
@@ -7851,6 +7971,7 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_set::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if ((ulonglong) nr > (ulonglong) (((longlong) 1 << typelib->count) -
(longlong) 1))
@@ -8034,6 +8155,7 @@ Field *Field_bit::new_key_field(MEM_ROOT *root,
int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int delta;
for (; length && !*from; from++, length--); // skip left 0's
@@ -8080,7 +8202,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
int Field_bit::store(double nr)
{
- return store((longlong) nr, FALSE);
+ return Field_bit::store((longlong) nr, FALSE);
}
@@ -8109,6 +8231,7 @@ double Field_bit::val_real(void)
longlong Field_bit::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulonglong bits= 0;
if (bit_len)
{
@@ -8133,6 +8256,7 @@ longlong Field_bit::val_int(void)
String *Field_bit::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char buff[sizeof(longlong)];
uint length= min(pack_length(), sizeof(longlong));
ulonglong bits= val_int();
@@ -8148,6 +8272,7 @@ String *Field_bit::val_str(String *val_buffer,
my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int2my_decimal(E_DEC_FATAL_ERROR, val_int(), 1, deciaml_value);
return deciaml_value;
}
@@ -8277,6 +8402,7 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int delta;
uchar bits= field_length & 7;
@@ -8839,15 +8965,14 @@ Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
null_bit= ((uchar) 1) << null_bit;
}
- switch (field_type)
- {
- case FIELD_TYPE_DATE:
- case FIELD_TYPE_NEWDATE:
- case FIELD_TYPE_TIME:
- case FIELD_TYPE_DATETIME:
- case FIELD_TYPE_TIMESTAMP:
- field_charset= &my_charset_bin;
- default: break;
+ switch (field_type) {
+ case FIELD_TYPE_DATE:
+ case FIELD_TYPE_NEWDATE:
+ case FIELD_TYPE_TIME:
+ case FIELD_TYPE_DATETIME:
+ case FIELD_TYPE_TIMESTAMP:
+ field_charset= &my_charset_bin;
+ default: break;
}
if (f_is_alpha(pack_flag))
diff --git a/sql/field.h b/sql/field.h
index b473100eaab..2ac7ec2c69d 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -62,10 +62,9 @@ public:
struct st_table *orig_table; // Pointer to original table
const char **table_name, *field_name;
LEX_STRING comment;
- query_id_t query_id; // For quick test of used fields
- bool add_index; // For check if field will be indexed
/* Field is part of the following keys */
- key_map key_start,part_of_key,part_of_sortkey;
+ key_map key_start, part_of_key, part_of_key_not_clustered;
+ key_map part_of_sortkey;
/*
We use three additional unireg types for TIMESTAMP to overcome limitation
of current binary format of .frm file. We'd like to be able to support
@@ -88,12 +87,8 @@ public:
utype unireg_check;
uint32 field_length; // Length of field
- uint field_index; // field number in fields array
uint32 flags;
- /* fieldnr is the id of the field (first field = 1) as is also
- used in key_part.
- */
- uint16 fieldnr;
+ uint16 field_index; // field number in fields array
uchar null_bit; // Bit used to test null bit
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 42d25dbbaee..38c7910be91 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -44,6 +44,7 @@ static ha_rows find_all_keys(SORTPARAM *param,SQL_SELECT *select,
static int write_keys(SORTPARAM *param,uchar * *sort_keys,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
static void make_sortkey(SORTPARAM *param,uchar *to, byte *ref_pos);
+static void register_used_fields(SORTPARAM *param);
static int merge_index(SORTPARAM *param,uchar *sort_buffer,
BUFFPEK *buffpek,
uint maxbuffer,IO_CACHE *tempfile,
@@ -66,11 +67,11 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
table Table to sort
sortorder How to sort the table
s_length Number of elements in sortorder
- select condition to apply to the rows
- special Not used.
- (This could be used to sort the rows pointed on by
- select->file)
- examined_rows Store number of examined rows here
+ select Condition to apply to the rows
+ ha_maxrows Return only this many rows
+ sort_positions Set to 1 if we want to force sorting by position
+ (Needed by UPDATE/INSERT or ALTER TABLE)
+ examined_rows Store number of examined rows here
IMPLEMENTATION
Creates a set of pointers that can be used to read the rows
@@ -81,6 +82,10 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
Before calling filesort, one must have done
table->file->info(HA_STATUS_VARIABLE)
+ NOTES
+ If we sort by position (like if sort_positions is 1) filesort() will
+ call table->prepare_for_position().
+
RETURN
HA_POS_ERROR Error
# Number of rows
@@ -92,7 +97,8 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
*/
ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows)
+ SQL_SELECT *select, ha_rows max_rows,
+ bool sort_positions, ha_rows *examined_rows)
{
int error;
ulong memavl, min_sort_memory;
@@ -128,8 +134,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.ref_length= table->file->ref_length;
param.addon_field= 0;
param.addon_length= 0;
- if (!(table->file->table_flags() & HA_FAST_KEY_READ) &&
- !table->fulltext_searched)
+ if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
+ !table->fulltext_searched && !sort_positions)
{
/*
Get the descriptors of all fields whose values are appended
@@ -175,7 +181,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
if (select && select->quick && select->quick->records > 0L)
{
records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
- table->file->records)+EXTRA_RECORDS;
+ table->file->stats.records)+EXTRA_RECORDS;
selected_records_file=0;
}
else
@@ -404,8 +410,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
TABLE *sort_form;
volatile THD::killed_state *killed= &current_thd->killed;
handler *file;
+ MY_BITMAP *save_read_set, *save_write_set;
DBUG_ENTER("find_all_keys");
- DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row")));
+ DBUG_PRINT("info",("using: %s",
+ (select ? select->quick ? "ranges" : "where":
+ "every row")));
idx=indexpos=0;
error=quick_select=0;
@@ -415,7 +424,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
ref_pos= ref_buff;
quick_select=select && select->quick;
record=0;
- flag= ((!indexfile && file->table_flags() & HA_REC_NOT_IN_SEQ)
+ flag= ((!indexfile && file->ha_table_flags() & HA_REC_NOT_IN_SEQ)
|| quick_select);
if (indexfile || flag)
ref_pos= &file->ref[0];
@@ -437,6 +446,19 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
select, 1, 1);
}
+ /* Remember original bitmaps */
+ save_read_set= sort_form->read_set;
+ save_write_set= sort_form->write_set;
+ /* Set up temporary column read map for columns used by sort */
+ bitmap_clear_all(&sort_form->tmp_set);
+ /* Temporary set for register_used_fields and register_field_in_read_map */
+ sort_form->read_set= &sort_form->tmp_set;
+ register_used_fields(param);
+ if (select and select->cond)
+ select->cond->walk(&Item::register_field_in_read_map, 1,
+ (byte*) sort_form);
+ sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set);
+
for (;;)
{
if (quick_select)
@@ -515,6 +537,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
file->ha_rnd_end();
}
+ /* Signal we should use orignal column read and write maps */
+ sort_form->column_bitmaps_set(save_read_set, save_write_set);
+
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{
@@ -845,6 +870,50 @@ static void make_sortkey(register SORTPARAM *param,
return;
}
+
+/*
+ Register fields used by sorting in the sorted table's read set
+*/
+
+static void register_used_fields(SORTPARAM *param)
+{
+ reg1 SORT_FIELD *sort_field;
+ reg5 uint length;
+ TABLE *table=param->sort_form;
+ MY_BITMAP *bitmap= table->read_set;
+
+ for (sort_field= param->local_sortorder ;
+ sort_field != param->end ;
+ sort_field++)
+ {
+ Field *field;
+ if ((field= sort_field->field))
+ {
+ if (field->table == table)
+ bitmap_set_bit(bitmap, field->field_index);
+ }
+ else
+ { // Item
+ sort_field->item->walk(&Item::register_field_in_read_map, 1,
+ (byte *) table);
+ }
+ }
+
+ if (param->addon_field)
+ {
+ SORT_ADDON_FIELD *addonf= param->addon_field;
+ Field *field;
+ for ( ; (field= addonf->field) ; addonf++)
+ bitmap_set_bit(bitmap, field->field_index);
+ }
+ else
+ {
+ /* Save filepos last */
+ table->prepare_for_position();
+ }
+}
+
+
static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count,
FILESORT_INFO *table_sort)
{
@@ -1353,7 +1422,8 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
uint length= 0;
uint fields= 0;
uint null_fields= 0;
- query_id_t query_id= thd->query_id;
+ MY_BITMAP *read_set= (*ptabfield)->table->read_set;
+
/*
If there is a reference to a field in the query add it
to the the set of appended fields.
@@ -1365,17 +1435,9 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
*/
*plength= 0;
- /*
- The following statement is added to avoid sorting in alter_table.
- The fact is the filter 'field->query_id != thd->query_id'
- doesn't work for alter table
- */
- if (thd->lex->sql_command != SQLCOM_SELECT &&
- thd->lex->sql_command != SQLCOM_INSERT_SELECT)
- return 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
- if (field->query_id != query_id)
+ if (!bitmap_is_set(read_set, field->field_index))
continue;
if (field->flags & BLOB_FLAG)
return 0;
@@ -1398,7 +1460,7 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
null_fields= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
- if (field->query_id != thd->query_id)
+ if (!bitmap_is_set(read_set, field->field_index))
continue;
addonf->field= field;
addonf->offset= length;
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 473fb149871..1f9336d81f7 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -25,7 +25,8 @@
We will need an updated Berkeley DB version for this.
- Killing threads that has got a 'deadlock'
- SHOW TABLE STATUS should give more information about the table.
- - Get a more accurate count of the number of rows (estimate_rows_upper_bound()).
+ - Get a more accurate count of the number of rows
+ (estimate_rows_upper_bound()).
We could store the found number of rows when the table is scanned and
then increment the counter for each attempted write.
- We will need to extend the manager thread to makes checkpoints at
@@ -123,7 +124,8 @@ static int berkeley_rollback(THD *thd, bool all);
static int berkeley_rollback_to_savepoint(THD* thd, void *savepoint);
static int berkeley_savepoint(THD* thd, void *savepoint);
static int berkeley_release_savepoint(THD* thd, void *savepoint);
-static handler *berkeley_create_handler(TABLE_SHARE *table);
+static handler *berkeley_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
static const char berkeley_hton_name[]= "BerkeleyDB";
static const char berkeley_hton_comment[]=
@@ -167,9 +169,9 @@ handlerton berkeley_hton = {
NULL /* release_temporary_latches */
};
-handler *berkeley_create_handler(TABLE_SHARE *table)
+static handler *berkeley_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_berkeley(table);
+ return new (mem_root) ha_berkeley(table);
}
typedef struct st_berkeley_trx_data {
@@ -479,7 +481,7 @@ void berkeley_cleanup_log_files(void)
ha_berkeley::ha_berkeley(TABLE_SHARE *table_arg)
:handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0),
int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
- HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
+ HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
HA_CAN_GEOMETRY |
HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
@@ -783,7 +785,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
transaction=0;
cursor=0;
key_read=0;
- block_size=8192; // Berkeley DB block size
+ stats.block_size=8192; // Berkeley DB block size
share->fixed_length_row= !(table_share->db_create_options &
HA_OPTION_PACK_RECORD);
@@ -799,7 +801,7 @@ int ha_berkeley::close(void)
my_free((char*) rec_buff,MYF(MY_ALLOW_ZERO_PTR));
my_free(alloc_ptr,MYF(MY_ALLOW_ZERO_PTR));
- ha_berkeley::extra(HA_EXTRA_RESET); // current_row buffer
+ ha_berkeley::reset(); // current_row buffer
DBUG_RETURN(free_share(share,table, hidden_primary_key,0));
}
@@ -900,11 +902,13 @@ void ha_berkeley::unpack_row(char *record, DBT *row)
else
{
/* Copy null bits */
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
const char *ptr= (const char*) row->data;
memcpy(record, ptr, table_share->null_bytes);
ptr+= table_share->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr= (*field)->unpack(record + (*field)->offset(), ptr);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
}
}
@@ -962,6 +966,7 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
KEY *key_info=table->key_info+keynr;
KEY_PART_INFO *key_part=key_info->key_part;
KEY_PART_INFO *end=key_part+key_info->key_parts;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
DBUG_ENTER("create_key");
key->data=buff;
@@ -985,6 +990,7 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
}
key->size= (buff - (char*) key->data);
DBUG_DUMP("key",(char*) key->data, key->size);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(key);
}
@@ -1002,6 +1008,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
KEY *key_info=table->key_info+keynr;
KEY_PART_INFO *key_part=key_info->key_part;
KEY_PART_INFO *end=key_part+key_info->key_parts;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
DBUG_ENTER("bdb:pack_key");
bzero((char*) key,sizeof(*key));
@@ -1029,6 +1036,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
}
key->size= (buff - (char*) key->data);
DBUG_DUMP("key",(char*) key->data, key->size);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(key);
}
@@ -1267,8 +1275,8 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
DB_TXN *sub_trans;
bool primary_key_changed;
DBUG_ENTER("update_row");
- LINT_INIT(error);
+ LINT_INIT(error);
statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
@@ -1849,8 +1857,9 @@ void ha_berkeley::info(uint flag)
DBUG_ENTER("ha_berkeley::info");
if (flag & HA_STATUS_VARIABLE)
{
- records = share->rows + changed_rows; // Just to get optimisations right
- deleted = 0;
+ // Just to get optimizations right
+ stats.records = share->rows + changed_rows;
+ stats.deleted = 0;
}
if ((flag & HA_STATUS_CONST) || version != share->version)
{
@@ -1871,19 +1880,8 @@ void ha_berkeley::info(uint flag)
int ha_berkeley::extra(enum ha_extra_function operation)
{
switch (operation) {
- case HA_EXTRA_RESET:
case HA_EXTRA_RESET_STATE:
- key_read=0;
- using_ignore=0;
- if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC))
- {
- current_row.flags=0;
- if (current_row.data)
- {
- free(current_row.data);
- current_row.data=0;
- }
- }
+ reset();
break;
case HA_EXTRA_KEYREAD:
key_read=1; // Query satisfied with key
@@ -1906,8 +1904,17 @@ int ha_berkeley::extra(enum ha_extra_function operation)
int ha_berkeley::reset(void)
{
- ha_berkeley::extra(HA_EXTRA_RESET);
- key_read=0; // Reset to state after open
+ key_read= 0;
+ using_ignore= 0;
+ if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC))
+ {
+ current_row.flags= 0;
+ if (current_row.data)
+ {
+ free(current_row.data);
+ current_row.data= 0;
+ }
+ }
return 0;
}
@@ -2196,7 +2203,7 @@ int ha_berkeley::rename_table(const char * from, const char * to)
double ha_berkeley::scan_time()
{
- return rows2double(records/3);
+ return rows2double(stats.records/3);
}
ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
@@ -2249,7 +2256,7 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
end_pos=end_range.less;
else
end_pos=end_range.less+end_range.equal;
- rows=(end_pos-start_pos)*records;
+ rows=(end_pos-start_pos)*stats.records;
DBUG_PRINT("exit",("rows: %g",rows));
DBUG_RETURN((ha_rows)(rows <= 1.0 ? 1 : rows));
}
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 21b618b8d6d..c7c2f135bbd 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -90,7 +90,7 @@ class ha_berkeley: public handler
ulong index_flags(uint idx, uint part, bool all_parts) const;
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
- ulong table_flags(void) const { return int_table_flags; }
+ ulonglong table_flags(void) const { return int_table_flags; }
uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_rows_upper_bound();
@@ -98,7 +98,6 @@ class ha_berkeley: public handler
uint max_supported_key_part_length() const { return UINT_MAX32; }
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
- bool has_transactions() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
int close(void);
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index f4fc5f47193..1471b25f7b6 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -364,7 +364,8 @@ pthread_mutex_t federated_mutex; // To init the hash
static int federated_init= FALSE; // Checking the state of hash
/* Static declaration for handerton */
-static handler *federated_create_handler(TABLE_SHARE *table);
+static handler *federated_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
static int federated_commit(THD *thd, bool all);
static int federated_rollback(THD *thd, bool all);
@@ -412,9 +413,10 @@ handlerton federated_hton= {
};
-static handler *federated_create_handler(TABLE_SHARE *table)
+static handler *federated_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
{
- return new ha_federated(table);
+ return new (mem_root) ha_federated(table);
}
@@ -796,6 +798,7 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
{
ulong *lengths;
Field **field;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
lengths= mysql_fetch_lengths(stored_result);
@@ -814,12 +817,15 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
(*field)->set_null();
else
{
- (*field)->set_notnull();
- (*field)->store(*row, *lengths, &my_charset_bin);
+ if (bitmap_is_set(table->read_set, (*field)->field_index))
+ {
+ (*field)->set_notnull();
+ (*field)->store(*row, *lengths, &my_charset_bin);
+ }
}
(*field)->move_field_offset(-old_ptr);
}
-
+ dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(0);
}
@@ -1138,22 +1144,25 @@ bool ha_federated::create_where_from_key(String *to,
KEY *key_info,
const key_range *start_key,
const key_range *end_key,
- bool records_in_range)
+ bool records_in_range,
+ bool eq_range)
{
- bool both_not_null=
+ bool both_not_null=
(start_key != NULL && end_key != NULL) ? TRUE : FALSE;
const byte *ptr;
uint remainder, length;
char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
const key_range *ranges[2]= { start_key, end_key };
+ my_bitmap_map *old_map;
DBUG_ENTER("ha_federated::create_where_from_key");
tmp.length(0);
if (start_key == NULL && end_key == NULL)
DBUG_RETURN(1);
- for (int i= 0; i <= 1; i++)
+ old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ for (uint i= 0; i <= 1; i++)
{
bool needs_quotes;
KEY_PART_INFO *key_part;
@@ -1187,16 +1196,16 @@ bool ha_federated::create_where_from_key(String *to,
{
if (emit_key_part_name(&tmp, key_part) ||
tmp.append(FEDERATED_ISNULL))
- DBUG_RETURN(1);
+ goto err;
continue;
}
}
if (tmp.append(FEDERATED_OPENPAREN))
- DBUG_RETURN(1);
+ goto err;
- switch(ranges[i]->flag) {
- case(HA_READ_KEY_EXACT):
+ switch (ranges[i]->flag) {
+ case HA_READ_KEY_EXACT:
DBUG_PRINT("info", ("federated HA_READ_KEY_EXACT %d", i));
if (store_length >= length ||
!needs_quotes ||
@@ -1204,22 +1213,22 @@ bool ha_federated::create_where_from_key(String *to,
field->result_type() != STRING_RESULT)
{
if (emit_key_part_name(&tmp, key_part))
- DBUG_RETURN(1);
+ goto err;
if (records_in_range)
{
if (tmp.append(FEDERATED_GE))
- DBUG_RETURN(1);
+ goto err;
}
else
{
if (tmp.append(FEDERATED_EQ))
- DBUG_RETURN(1);
+ goto err;
}
if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
part_length))
- DBUG_RETURN(1);
+ goto err;
}
else
{
@@ -1228,43 +1237,49 @@ bool ha_federated::create_where_from_key(String *to,
tmp.append(FEDERATED_LIKE) ||
emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
part_length))
- DBUG_RETURN(1);
+ goto err;
}
break;
- case(HA_READ_AFTER_KEY):
+ case HA_READ_AFTER_KEY:
+ if (eq_range)
+ {
+ if (tmp.append("1=1")) // Dummy
+ goto err;
+ break;
+ }
DBUG_PRINT("info", ("federated HA_READ_AFTER_KEY %d", i));
if (store_length >= length) /* end key */
{
if (emit_key_part_name(&tmp, key_part))
- DBUG_RETURN(1);
+ goto err;
if (i > 0) /* end key */
{
if (tmp.append(FEDERATED_LE))
- DBUG_RETURN(1);
+ goto err;
}
else /* start key */
{
if (tmp.append(FEDERATED_GT))
- DBUG_RETURN(1);
+ goto err;
}
if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
part_length))
{
- DBUG_RETURN(1);
+ goto err;
}
break;
}
- case(HA_READ_KEY_OR_NEXT):
+ case HA_READ_KEY_OR_NEXT:
DBUG_PRINT("info", ("federated HA_READ_KEY_OR_NEXT %d", i));
if (emit_key_part_name(&tmp, key_part) ||
tmp.append(FEDERATED_GE) ||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
part_length))
- DBUG_RETURN(1);
+ goto err;
break;
- case(HA_READ_BEFORE_KEY):
+ case HA_READ_BEFORE_KEY:
DBUG_PRINT("info", ("federated HA_READ_BEFORE_KEY %d", i));
if (store_length >= length)
{
@@ -1272,23 +1287,23 @@ bool ha_federated::create_where_from_key(String *to,
tmp.append(FEDERATED_LT) ||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
part_length))
- DBUG_RETURN(1);
+ goto err;
break;
}
- case(HA_READ_KEY_OR_PREV):
+ case HA_READ_KEY_OR_PREV:
DBUG_PRINT("info", ("federated HA_READ_KEY_OR_PREV %d", i));
if (emit_key_part_name(&tmp, key_part) ||
tmp.append(FEDERATED_LE) ||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
part_length))
- DBUG_RETURN(1);
+ goto err;
break;
default:
DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
- DBUG_RETURN(1);
+ goto err;
}
if (tmp.append(FEDERATED_CLOSEPAREN))
- DBUG_RETURN(1);
+ goto err;
next_loop:
if (store_length >= length)
@@ -1298,13 +1313,15 @@ next_loop:
length-= store_length;
ptr+= store_length;
if (tmp.append(FEDERATED_AND))
- DBUG_RETURN(1);
+ goto err;
DBUG_PRINT("info",
("create_where_from_key WHERE clause: %s",
tmp.c_ptr_quick()));
}
}
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+
if (both_not_null)
if (tmp.append(FEDERATED_CLOSEPAREN))
DBUG_RETURN(1);
@@ -1316,6 +1333,10 @@ next_loop:
DBUG_RETURN(1);
DBUG_RETURN(0);
+
+err:
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ DBUG_RETURN(1);
}
/*
@@ -1355,7 +1376,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.append(FEDERATED_BTICK);
query.append(FEDERATED_COMMA);
}
- query.length(query.length()- (FEDERATED_COMMA_LEN - 1));
+ query.length(query.length()- FEDERATED_COMMA_LEN);
query.append(FEDERATED_FROM);
query.append(FEDERATED_BTICK);
@@ -1606,15 +1627,16 @@ int ha_federated::write_row(byte *buf)
String insert_field_value_string(insert_field_value_buffer,
sizeof(insert_field_value_buffer),
&my_charset_bin);
- values_string.length(0);
- insert_string.length(0);
- insert_field_value_string.length(0);
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
DBUG_ENTER("ha_federated::write_row");
DBUG_PRINT("info",
("table charset name %s csname %s",
table->s->table_charset->name,
table->s->table_charset->csname));
+ values_string.length(0);
+ insert_string.length(0);
+ insert_field_value_string.length(0);
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
@@ -1641,7 +1663,7 @@ int ha_federated::write_row(byte *buf)
for (field= table->field; *field; field++)
{
/* if there is a query id and if it's equal to the current query id */
- if (ha_get_bit_in_write_set((*field)->fieldnr))
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
{
/*
There are some fields. This will be used later to determine
@@ -1666,22 +1688,17 @@ int ha_federated::write_row(byte *buf)
/* append commas between both fields and fieldnames */
/*
- unfortunately, we can't use the logic
- if *(fields + 1) to make the following
- appends conditional because we may not append
- if the next field doesn't match the condition:
- (((*field)->query_id && (*field)->query_id == current_query_id)
+ unfortunately, we can't use the logic if *(fields + 1) to
+ make the following appends conditional as we don't know if the
+ next field is in the write set
*/
insert_string.append(FEDERATED_COMMA);
values_string.append(FEDERATED_COMMA);
}
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
/*
- remove trailing comma
- */
- insert_string.length(insert_string.length() - strlen(FEDERATED_COMMA));
- /*
if there were no fields, we don't want to add a closing paren
AND, we don't want to chop off the last char '('
insert will be "INSERT INTO t1 VALUES ();"
@@ -1689,9 +1706,13 @@ int ha_federated::write_row(byte *buf)
if (has_fields)
{
/* chops off leading commas */
- values_string.length(values_string.length() - strlen(FEDERATED_COMMA));
+ insert_string.length(insert_string.length() - FEDERATED_COMMA_LEN);
+ values_string.length(values_string.length() - FEDERATED_COMMA_LEN);
insert_string.append(FEDERATED_CLOSEPAREN);
}
+ else
+ insert_string.length(insert_string.length() - FEDERATED_CLOSEPAREN_LEN);
+
/* we always want to append this, even if there aren't any fields */
values_string.append(FEDERATED_CLOSEPAREN);
@@ -1705,8 +1726,8 @@ int ha_federated::write_row(byte *buf)
DBUG_RETURN(stash_remote_error());
}
/*
- If the table we've just written a record to contains an auto_increment field,
- then store the last_insert_id() value from the foreign server
+ If the table we've just written a record to contains an auto_increment
+ field, then store the last_insert_id() value from the foreign server
*/
if (table->next_number_field)
update_auto_increment();
@@ -1728,7 +1749,7 @@ void ha_federated::update_auto_increment(void)
DBUG_ENTER("ha_federated::update_auto_increment");
thd->insert_id(mysql->last_used_con->insert_id);
- DBUG_PRINT("info",("last_insert_id %d", auto_increment_value));
+ DBUG_PRINT("info",("last_insert_id %d", stats.auto_increment_value));
DBUG_VOID_RETURN;
}
@@ -1816,7 +1837,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
this? Because we only are updating one record, and LIMIT enforces
this.
*/
- bool has_a_primary_key= (table->s->primary_key == 0 ? TRUE : FALSE);
+ bool has_a_primary_key= test(table->s->primary_key != MAX_KEY);
/*
buffers for following strings
*/
@@ -1868,48 +1889,52 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
for (Field **field= table->field; *field; field++)
{
- where_string.append((*field)->field_name);
- update_string.append((*field)->field_name);
- update_string.append(FEDERATED_EQ);
-
- if ((*field)->is_null())
- new_field_value.append(FEDERATED_NULL);
- else
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
{
- /* otherwise = */
- (*field)->val_str(&new_field_value);
- (*field)->quote_data(&new_field_value);
-
- if (!field_in_record_is_null(table, *field, (char*) old_data))
- where_string.append(FEDERATED_EQ);
- }
-
- if (field_in_record_is_null(table, *field, (char*) old_data))
- where_string.append(FEDERATED_ISNULL);
- else
- {
- (*field)->val_str(&old_field_value,
- (char*) (old_data + (*field)->offset()));
- (*field)->quote_data(&old_field_value);
- where_string.append(old_field_value);
+ if ((*field)->is_null())
+ new_field_value.append(FEDERATED_NULL);
+ else
+ {
+ my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ /* otherwise = */
+ (*field)->val_str(&new_field_value);
+ (*field)->quote_data(&new_field_value);
+ tmp_restore_column_map(table->read_set, old_map);
+ }
+ update_string.append((*field)->field_name);
+ update_string.append(FEDERATED_EQ);
+ update_string.append(new_field_value);
+ update_string.append(FEDERATED_COMMA);
+ new_field_value.length(0);
}
- update_string.append(new_field_value);
- new_field_value.length(0);
-
- /*
- Only append conjunctions if we have another field in which
- to iterate
- */
- if (*(field + 1))
+ if (bitmap_is_set(table->read_set, (*field)->field_index))
{
- update_string.append(FEDERATED_COMMA);
+ where_string.append((*field)->field_name);
+ if (field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append(FEDERATED_ISNULL);
+ else
+ {
+ where_string.append(FEDERATED_EQ);
+ (*field)->val_str(&old_field_value,
+ (char*) (old_data + (*field)->offset()));
+ (*field)->quote_data(&old_field_value);
+ where_string.append(old_field_value);
+ old_field_value.length(0);
+ }
where_string.append(FEDERATED_AND);
}
- old_field_value.length(0);
}
- update_string.append(FEDERATED_WHERE);
- update_string.append(where_string);
+
+ /* Remove last ', '. This works as there must be at least on updated field */
+ update_string.length(update_string.length() - FEDERATED_COMMA_LEN);
+ if (where_string.length())
+ {
+ where_string.length(where_string.length() - FEDERATED_AND_LEN);
+ update_string.append(FEDERATED_WHERE);
+ update_string.append(where_string);
+ }
+
/*
If this table has not a primary key, then we could possibly
update multiple rows. We want to make sure to only update one!
@@ -1943,9 +1968,9 @@ int ha_federated::delete_row(const byte *buf)
{
char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char data_buffer[FEDERATED_QUERY_BUFFER_SIZE];
-
String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
+ uint found= 0;
DBUG_ENTER("ha_federated::delete_row");
delete_string.length(0);
@@ -1959,25 +1984,31 @@ int ha_federated::delete_row(const byte *buf)
for (Field **field= table->field; *field; field++)
{
Field *cur_field= *field;
- data_string.length(0);
- delete_string.append(cur_field->field_name);
-
- if (cur_field->is_null())
- {
- delete_string.append(FEDERATED_IS);
- data_string.append(FEDERATED_NULL);
- }
- else
+ found++;
+ if (bitmap_is_set(table->read_set, cur_field->field_index))
{
- delete_string.append(FEDERATED_EQ);
- cur_field->val_str(&data_string);
- cur_field->quote_data(&data_string);
+ data_string.length(0);
+ delete_string.append(cur_field->field_name);
+ if (cur_field->is_null())
+ {
+ delete_string.append(FEDERATED_IS);
+ delete_string.append(FEDERATED_NULL);
+ }
+ else
+ {
+ delete_string.append(FEDERATED_EQ);
+ cur_field->val_str(&data_string);
+ cur_field->quote_data(&data_string);
+ delete_string.append(data_string);
+ }
+ delete_string.append(FEDERATED_AND);
}
-
- delete_string.append(data_string);
- delete_string.append(FEDERATED_AND);
}
- delete_string.length(delete_string.length()-5); // Remove trailing AND
+
+ // Remove trailing AND
+ delete_string.length(delete_string.length() - FEDERATED_AND_LEN);
+ if (!found)
+ delete_string.length(delete_string.length() - FEDERATED_WHERE_LEN);
delete_string.append(FEDERATED_LIMIT1);
DBUG_PRINT("info",
@@ -1986,10 +2017,10 @@ int ha_federated::delete_row(const byte *buf)
{
DBUG_RETURN(stash_remote_error());
}
- deleted+= mysql->affected_rows;
+ stats.deleted+= mysql->affected_rows;
DBUG_PRINT("info",
("rows deleted %d rows deleted for all time %d",
- int(mysql->affected_rows), deleted));
+ int(mysql->affected_rows), stats.deleted));
DBUG_RETURN(0);
}
@@ -2050,7 +2081,7 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
create_where_from_key(&index_string,
&table->key_info[index],
&range,
- NULL, 0);
+ NULL, 0, 0);
sql_query.append(index_string);
DBUG_PRINT("info",
@@ -2112,15 +2143,10 @@ int ha_federated::index_init(uint keynr, bool sorted)
DBUG_RETURN(0);
}
-/*
- int read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted);
-*/
int ha_federated::read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted)
+ const key_range *end_key,
+ bool eq_range, bool sorted)
{
char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
int retval;
@@ -2136,7 +2162,7 @@ int ha_federated::read_range_first(const key_range *start_key,
sql_query.append(share->select_query);
create_where_from_key(&sql_query,
&table->key_info[active_index],
- start_key, end_key, 0);
+ start_key, end_key, 0, eq_range);
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
@@ -2481,18 +2507,20 @@ void ha_federated::info(uint flag)
delete_length = ?
*/
if (row[4] != NULL)
- records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
+ stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0,
+ &error);
if (row[5] != NULL)
- mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
+ stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0,
+ &error);
if (row[12] != NULL)
- update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
+ stats.update_time= (ha_rows) my_strtoll10(row[12], (char**) 0,
+ &error);
if (row[13] != NULL)
- check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
+ stats.check_time= (ha_rows) my_strtoll10(row[13], (char**) 0,
+ &error);
}
if (flag & HA_STATUS_CONST)
- {
- block_size= 4096;
- }
+ stats.block_size= 4096;
}
if (result)
@@ -2543,8 +2571,8 @@ int ha_federated::delete_all_rows()
{
DBUG_RETURN(stash_remote_error());
}
- deleted+= records;
- records= 0;
+ stats.deleted+= stats.records;
+ stats.records= 0;
DBUG_RETURN(0);
}
@@ -2822,6 +2850,7 @@ mysql_declare_plugin(federated)
NULL, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index 458ef42ebda..1d8bda43e24 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -39,83 +39,83 @@
#define FEDERATED_RECORDS_IN_RANGE 2
#define FEDERATED_INFO " SHOW TABLE STATUS LIKE "
-#define FEDERATED_INFO_LEN sizeof(FEDERATED_INFO)
+#define FEDERATED_INFO_LEN (sizeof(FEDERATED_INFO) -1)
#define FEDERATED_SELECT "SELECT "
-#define FEDERATED_SELECT_LEN sizeof(FEDERATED_SELECT)
+#define FEDERATED_SELECT_LEN (sizeof(FEDERATED_SELECT) -1)
#define FEDERATED_WHERE " WHERE "
-#define FEDERATED_WHERE_LEN sizeof(FEDERATED_WHERE)
+#define FEDERATED_WHERE_LEN (sizeof(FEDERATED_WHERE) -1)
#define FEDERATED_FROM " FROM "
-#define FEDERATED_FROM_LEN sizeof(FEDERATED_FROM)
+#define FEDERATED_FROM_LEN (sizeof(FEDERATED_FROM) -1)
#define FEDERATED_PERCENT "%"
-#define FEDERATED_PERCENT_LEN sizeof(FEDERATED_PERCENT)
+#define FEDERATED_PERCENT_LEN (sizeof(FEDERATED_PERCENT) -1)
#define FEDERATED_IS " IS "
-#define FEDERATED_IS_LEN sizeof(FEDERATED_IS)
+#define FEDERATED_IS_LEN (sizeof(FEDERATED_IS) -1)
#define FEDERATED_NULL " NULL "
-#define FEDERATED_NULL_LEN sizeof(FEDERATED_NULL)
+#define FEDERATED_NULL_LEN (sizeof(FEDERATED_NULL) -1)
#define FEDERATED_ISNULL " IS NULL "
-#define FEDERATED_ISNULL_LEN sizeof(FEDERATED_ISNULL)
+#define FEDERATED_ISNULL_LEN (sizeof(FEDERATED_ISNULL) -1)
#define FEDERATED_LIKE " LIKE "
-#define FEDERATED_LIKE_LEN sizeof(FEDERATED_LIKE)
+#define FEDERATED_LIKE_LEN (sizeof(FEDERATED_LIKE) -1)
#define FEDERATED_TRUNCATE "TRUNCATE "
-#define FEDERATED_TRUNCATE_LEN sizeof(FEDERATED_TRUNCATE)
+#define FEDERATED_TRUNCATE_LEN (sizeof(FEDERATED_TRUNCATE) -1)
#define FEDERATED_DELETE "DELETE "
-#define FEDERATED_DELETE_LEN sizeof(FEDERATED_DELETE)
+#define FEDERATED_DELETE_LEN (sizeof(FEDERATED_DELETE) -1)
#define FEDERATED_INSERT "INSERT INTO "
-#define FEDERATED_INSERT_LEN sizeof(FEDERATED_INSERT)
+#define FEDERATED_INSERT_LEN (sizeof(FEDERATED_INSERT) -1)
#define FEDERATED_OPTIMIZE "OPTIMIZE TABLE "
-#define FEDERATED_OPTIMIZE_LEN sizeof(FEDERATED_OPTIMIZE)
+#define FEDERATED_OPTIMIZE_LEN (sizeof(FEDERATED_OPTIMIZE) -1)
#define FEDERATED_REPAIR "REPAIR TABLE "
-#define FEDERATED_REPAIR_LEN sizeof(FEDERATED_REPAIR)
+#define FEDERATED_REPAIR_LEN (sizeof(FEDERATED_REPAIR) -1)
#define FEDERATED_QUICK " QUICK"
-#define FEDERATED_QUICK_LEN sizeof(FEDERATED_QUICK)
+#define FEDERATED_QUICK_LEN (sizeof(FEDERATED_QUICK) -1)
#define FEDERATED_EXTENDED " EXTENDED"
-#define FEDERATED_EXTENDED_LEN sizeof(FEDERATED_EXTENDED)
+#define FEDERATED_EXTENDED_LEN (sizeof(FEDERATED_EXTENDED) -1)
#define FEDERATED_USE_FRM " USE_FRM"
-#define FEDERATED_USE_FRM_LEN sizeof(FEDERATED_USE_FRM)
+#define FEDERATED_USE_FRM_LEN (sizeof(FEDERATED_USE_FRM) -1)
#define FEDERATED_LIMIT1 " LIMIT 1"
-#define FEDERATED_LIMIT1_LEN sizeof(FEDERATED_LIMIT1)
+#define FEDERATED_LIMIT1_LEN (sizeof(FEDERATED_LIMIT1) -1)
#define FEDERATED_VALUES "VALUES "
-#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES)
+#define FEDERATED_VALUES_LEN (sizeof(FEDERATED_VALUES) -1)
#define FEDERATED_UPDATE "UPDATE "
-#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE)
-#define FEDERATED_SET "SET "
-#define FEDERATED_SET_LEN sizeof(FEDERATED_SET)
+#define FEDERATED_UPDATE_LEN (sizeof(FEDERATED_UPDATE) -1)
+#define FEDERATED_SET " SET "
+#define FEDERATED_SET_LEN (sizeof(FEDERATED_SET) -1)
#define FEDERATED_AND " AND "
-#define FEDERATED_AND_LEN sizeof(FEDERATED_AND)
+#define FEDERATED_AND_LEN (sizeof(FEDERATED_AND) -1)
#define FEDERATED_CONJUNCTION ") AND ("
-#define FEDERATED_CONJUNCTION_LEN sizeof(FEDERATED_CONJUNCTION)
+#define FEDERATED_CONJUNCTION_LEN (sizeof(FEDERATED_CONJUNCTION) -1)
#define FEDERATED_OR " OR "
-#define FEDERATED_OR_LEN sizeof(FEDERATED_OR)
+#define FEDERATED_OR_LEN (sizeof(FEDERATED_OR) -1)
#define FEDERATED_NOT " NOT "
-#define FEDERATED_NOT_LEN sizeof(FEDERATED_NOT)
+#define FEDERATED_NOT_LEN (sizeof(FEDERATED_NOT) -1)
#define FEDERATED_STAR "* "
-#define FEDERATED_STAR_LEN sizeof(FEDERATED_STAR)
+#define FEDERATED_STAR_LEN (sizeof(FEDERATED_STAR) -1)
#define FEDERATED_SPACE " "
-#define FEDERATED_SPACE_LEN sizeof(FEDERATED_SPACE)
+#define FEDERATED_SPACE_LEN (sizeof(FEDERATED_SPACE) -1)
#define FEDERATED_SQUOTE "'"
-#define FEDERATED_SQUOTE_LEN sizeof(FEDERATED_SQUOTE)
+#define FEDERATED_SQUOTE_LEN (sizeof(FEDERATED_SQUOTE) -1)
#define FEDERATED_COMMA ", "
-#define FEDERATED_COMMA_LEN sizeof(FEDERATED_COMMA)
+#define FEDERATED_COMMA_LEN (sizeof(FEDERATED_COMMA) -1)
#define FEDERATED_BTICK "`"
-#define FEDERATED_BTICK_LEN sizeof(FEDERATED_BTICK)
+#define FEDERATED_BTICK_LEN (sizeof(FEDERATED_BTICK) -1)
#define FEDERATED_OPENPAREN " ("
-#define FEDERATED_OPENPAREN_LEN sizeof(FEDERATED_OPENPAREN)
+#define FEDERATED_OPENPAREN_LEN (sizeof(FEDERATED_OPENPAREN) -1)
#define FEDERATED_CLOSEPAREN ") "
-#define FEDERATED_CLOSEPAREN_LEN sizeof(FEDERATED_CLOSEPAREN)
+#define FEDERATED_CLOSEPAREN_LEN (sizeof(FEDERATED_CLOSEPAREN) -1)
#define FEDERATED_NE " != "
-#define FEDERATED_NE_LEN sizeof(FEDERATED_NE)
+#define FEDERATED_NE_LEN (sizeof(FEDERATED_NE) -1)
#define FEDERATED_GT " > "
-#define FEDERATED_GT_LEN sizeof(FEDERATED_GT)
+#define FEDERATED_GT_LEN (sizeof(FEDERATED_GT) -1)
#define FEDERATED_LT " < "
-#define FEDERATED_LT_LEN sizeof(FEDERATED_LT)
+#define FEDERATED_LT_LEN (sizeof(FEDERATED_LT) -1)
#define FEDERATED_LE " <= "
-#define FEDERATED_LE_LEN sizeof(FEDERATED_LE)
+#define FEDERATED_LE_LEN (sizeof(FEDERATED_LE) -1)
#define FEDERATED_GE " >= "
-#define FEDERATED_GE_LEN sizeof(FEDERATED_GE)
+#define FEDERATED_GE_LEN (sizeof(FEDERATED_GE) -1)
#define FEDERATED_EQ " = "
-#define FEDERATED_EQ_LEN sizeof(FEDERATED_EQ)
+#define FEDERATED_EQ_LEN (sizeof(FEDERATED_EQ) -1)
#define FEDERATED_FALSE " 1=0"
-#define FEDERATED_FALSE_LEN sizeof(FEDERATED_FALSE)
+#define FEDERATED_FALSE_LEN (sizeof(FEDERATED_FALSE) -1)
/*
FEDERATED_SHARE is a structure that will be shared amoung all open handlers
@@ -168,7 +168,7 @@ private:
bool create_where_from_key(String *to, KEY *key_info,
const key_range *start_key,
const key_range *end_key,
- bool records_in_range);
+ bool records_in_range, bool eq_range);
int stash_remote_error();
public:
@@ -192,12 +192,12 @@ public:
implements. The current table flags are documented in
handler.h
*/
- ulong table_flags() const
+ ulonglong table_flags() const
{
/* fix server to be able to get remote server table flags */
- return (HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
- HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS);
+ return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
+ HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS |
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_PARTIAL_COLUMN_READ);
}
/*
This is a bitmap of flags that says how the storage engine
@@ -231,8 +231,8 @@ public:
*/
double scan_time()
{
- DBUG_PRINT("info", ("records %lu", (ulong) records));
- return (double)(records*1000);
+ DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
+ return (double)(stats.records*1000);
}
/*
The next method will never be called if you do not implement indexes.
@@ -302,7 +302,6 @@ public:
int external_lock(THD *thd, int lock_type);
int connection_commit();
int connection_rollback();
- bool has_transactions() { return 1; }
int connection_autocommit(bool state);
int execute_simple_query(const char *query, int len);
};
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 1a7efb42748..7b2d83cbc8d 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -24,7 +24,7 @@
#include "ha_heap.h"
-static handler *heap_create_handler(TABLE_SHARE *table);
+static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
static const char heap_hton_name[]= "MEMORY";
static const char heap_hton_comment[]=
@@ -68,9 +68,9 @@ handlerton heap_hton= {
NULL /* release_temporary_latches */
};
-static handler *heap_create_handler(TABLE_SHARE *table)
+static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_heap(table);
+ return new (mem_root) ha_heap(table);
}
@@ -361,16 +361,16 @@ void ha_heap::info(uint flag)
HEAPINFO info;
(void) heap_info(file,&info,flag);
- records = info.records;
- deleted = info.deleted;
- errkey = info.errkey;
- mean_rec_length=info.reclength;
- data_file_length=info.data_length;
- index_file_length=info.index_length;
- max_data_file_length= info.max_records* info.reclength;
- delete_length= info.deleted * info.reclength;
+ errkey= info.errkey;
+ stats.records = info.records;
+ stats.deleted = info.deleted;
+ stats.mean_rec_length=info.reclength;
+ stats.data_file_length=info.data_length;
+ stats.index_file_length=info.index_length;
+ stats.max_data_file_length= info.max_records* info.reclength;
+ stats.delete_length= info.deleted * info.reclength;
if (flag & HA_STATUS_AUTO)
- auto_increment_value= info.auto_increment;
+ stats.auto_increment_value= info.auto_increment;
/*
If info() is called for the first time after open(), we will still
have to update the key statistics. Hoping that a table lock is now
@@ -380,11 +380,19 @@ void ha_heap::info(uint flag)
update_key_stats();
}
+
int ha_heap::extra(enum ha_extra_function operation)
{
return heap_extra(file,operation);
}
+
+int ha_heap::reset()
+{
+ return heap_reset(file);
+}
+
+
int ha_heap::delete_all_rows()
{
heap_clear(file);
@@ -561,8 +569,8 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
max_key->flag != HA_READ_AFTER_KEY)
return HA_POS_ERROR; // Can only use exact keys
- if (records <= 1)
- return records;
+ if (stats.records <= 1)
+ return stats.records;
/* Assert that info() did run. We need current statistics here. */
DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
@@ -690,13 +698,13 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
{
table->file->info(HA_STATUS_AUTO);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
- create_info->auto_increment_value= auto_increment_value;
+ create_info->auto_increment_value= stats.auto_increment_value;
}
ulonglong ha_heap::get_auto_increment()
{
ha_heap::info(HA_STATUS_AUTO);
- return auto_increment_value;
+ return stats.auto_increment_value;
}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index 9b9b7f90d90..5933f6a0ff1 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -46,11 +46,11 @@ public:
/* Rows also use a fixed-size format */
enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
const char **bas_ext() const;
- ulong table_flags() const
+ ulonglong table_flags() const
{
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
- HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED);
+ HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
@@ -61,7 +61,8 @@ public:
const key_map *keys_to_use_for_scanning() { return &btree_keys; }
uint max_supported_keys() const { return MAX_KEY; }
uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
- double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ double scan_time()
+ { return (double) (stats.records+stats.deleted) / 20.0+10; }
double read_time(uint index, uint ranges, ha_rows rows)
{ return (double) rows / 20.0+1; }
@@ -87,6 +88,7 @@ public:
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
+ int reset();
int external_lock(THD *thd, int lock_type);
int delete_all_rows(void);
int disable_indexes(uint mode);
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 0b2f561e8c9..8811cbdce33 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -203,7 +203,8 @@ static int innobase_rollback(THD* thd, bool all);
static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
static int innobase_savepoint(THD* thd, void *savepoint);
static int innobase_release_savepoint(THD* thd, void *savepoint);
-static handler *innobase_create_handler(TABLE_SHARE *table);
+static handler *innobase_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
static const char innobase_hton_name[]= "InnoDB";
static const char innobase_hton_comment[]=
@@ -248,9 +249,9 @@ handlerton innobase_hton = {
};
-static handler *innobase_create_handler(TABLE_SHARE *table)
+static handler *innobase_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_innobase(table);
+ return new (mem_root) ha_innobase(table);
}
@@ -843,10 +844,9 @@ ha_innobase::ha_innobase(TABLE_SHARE *table_arg)
HA_NULL_IN_KEY |
HA_CAN_INDEX_BLOBS |
HA_CAN_SQL_HANDLER |
- HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS |
+ HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
HA_PRIMARY_KEY_IN_READ_INDEX |
- HA_CAN_GEOMETRY |
+ HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ |
HA_TABLE_SCAN_ON_INDEX),
start_of_scan(0),
num_write_row(0)
@@ -2322,7 +2322,7 @@ ha_innobase::open(
}
}
- block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
+ stats.block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
in query optimization */
/* Init table lock structure */
@@ -2917,16 +2917,15 @@ ha_innobase::store_key_val_for_row(
/******************************************************************
Builds a 'template' to the prebuilt struct. The template is used in fast
retrieval of just those column values MySQL needs in its processing. */
-static
void
-build_template(
+ha_innobase::build_template(
/*===========*/
row_prebuilt_t* prebuilt, /* in: prebuilt struct */
THD* thd, /* in: current user thread, used
only if templ_type is
ROW_MYSQL_REC_FIELDS */
TABLE* table, /* in: MySQL table */
- ulint templ_type) /* in: ROW_MYSQL_WHOLE_ROW or
+ uint templ_type) /* in: ROW_MYSQL_WHOLE_ROW or
ROW_MYSQL_REC_FIELDS */
{
dict_index_t* index;
@@ -3035,8 +3034,8 @@ build_template(
goto include_field;
}
- if (table->file->ha_get_bit_in_read_set(i+1) ||
- table->file->ha_get_bit_in_write_set(i+1)) {
+ if (bitmap_is_set(table->read_set, i) ||
+ bitmap_is_set(table->write_set, i)) {
/* This field is needed in the query */
goto include_field;
@@ -5420,7 +5419,7 @@ ha_innobase::info(
nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
if (os_file_get_status(path,&stat_info)) {
- create_time = stat_info.ctime;
+ stats.create_time = stat_info.ctime;
}
}
@@ -5448,21 +5447,21 @@ ha_innobase::info(
n_rows++;
}
- records = (ha_rows)n_rows;
- deleted = 0;
- data_file_length = ((ulonglong)
+ stats.records = (ha_rows)n_rows;
+ stats.deleted = 0;
+ stats.data_file_length = ((ulonglong)
ib_table->stat_clustered_index_size)
* UNIV_PAGE_SIZE;
- index_file_length = ((ulonglong)
+ stats.index_file_length = ((ulonglong)
ib_table->stat_sum_of_other_index_sizes)
* UNIV_PAGE_SIZE;
- delete_length = 0;
- check_time = 0;
+ stats.delete_length = 0;
+ stats.check_time = 0;
- if (records == 0) {
- mean_rec_length = 0;
+ if (stats.records == 0) {
+ stats.mean_rec_length = 0;
} else {
- mean_rec_length = (ulong) (data_file_length / records);
+ stats.mean_rec_length = (ulong) (stats.data_file_length / stats.records);
}
}
@@ -5511,9 +5510,9 @@ ha_innobase::info(
if (index->stat_n_diff_key_vals[j + 1] == 0) {
- rec_per_key = records;
+ rec_per_key = stats.records;
} else {
- rec_per_key = (ha_rows)(records /
+ rec_per_key = (ha_rows)(stats.records /
index->stat_n_diff_key_vals[j + 1]);
}
@@ -5568,7 +5567,7 @@ ha_innobase::info(
}
}
- auto_increment_value = auto_inc;
+ stats.auto_increment_value = auto_inc;
}
prebuilt->trx->op_info = (char*)"";
@@ -5963,8 +5962,7 @@ ha_innobase::extra(
/*===============*/
/* out: 0 or error number */
enum ha_extra_function operation)
- /* in: HA_EXTRA_RETRIEVE_ALL_COLS or some
- other flag */
+ /* in: HA_EXTRA_FLUSH or some other flag */
{
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
@@ -5978,13 +5976,6 @@ ha_innobase::extra(
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
break;
- case HA_EXTRA_RESET:
- if (prebuilt->blob_heap) {
- row_mysql_prebuilt_free_blob_heap(prebuilt);
- }
- prebuilt->keep_other_fields_on_keyread = 0;
- prebuilt->read_just_key = 0;
- break;
case HA_EXTRA_RESET_STATE:
prebuilt->keep_other_fields_on_keyread = 0;
prebuilt->read_just_key = 0;
@@ -5992,16 +5983,6 @@ ha_innobase::extra(
case HA_EXTRA_NO_KEYREAD:
prebuilt->read_just_key = 0;
break;
- case HA_EXTRA_RETRIEVE_ALL_COLS:
- prebuilt->hint_need_to_fetch_extra_cols
- = ROW_RETRIEVE_ALL_COLS;
- break;
- case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
- if (prebuilt->hint_need_to_fetch_extra_cols == 0) {
- prebuilt->hint_need_to_fetch_extra_cols
- = ROW_RETRIEVE_PRIMARY_KEY;
- }
- break;
case HA_EXTRA_KEYREAD:
prebuilt->read_just_key = 1;
break;
@@ -6015,6 +5996,18 @@ ha_innobase::extra(
return(0);
}
+int ha_innobase::reset()
+{
+ row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ if (prebuilt->blob_heap) {
+ row_mysql_prebuilt_free_blob_heap(prebuilt);
+ }
+ prebuilt->keep_other_fields_on_keyread = 0;
+ prebuilt->read_just_key = 0;
+ return 0;
+}
+
+
/**********************************************************************
MySQL calls this function at the start of each SQL statement inside LOCK
TABLES. Inside LOCK TABLES the ::external_lock method does not work to
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 4f0c9eb151b..c84aff63e81 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -33,6 +33,8 @@ typedef struct st_innobase_share {
} INNOBASE_SHARE;
+struct row_prebuilt_struct;
+
my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
uint full_name_len,
ulonglong *unused);
@@ -89,7 +91,7 @@ class ha_innobase: public handler
const char* table_type() const { return("InnoDB");}
const char *index_type(uint key_number) { return "BTREE"; }
const char** bas_ext() const;
- ulong table_flags() const { return int_table_flags; }
+ ulonglong table_flags() const { return int_table_flags; }
ulong index_flags(uint idx, uint part, bool all_parts) const
{
return (HA_READ_NEXT |
@@ -109,7 +111,6 @@ class ha_innobase: public handler
uint max_supported_key_length() const { return 3500; }
uint max_supported_key_part_length() const;
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
- bool has_transactions() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
int close(void);
@@ -147,20 +148,10 @@ class ha_innobase: public handler
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
int discard_or_import_tablespace(my_bool discard);
int extra(enum ha_extra_function operation);
+ int reset();
int external_lock(THD *thd, int lock_type);
int transactional_table_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
-
- int ha_retrieve_all_cols()
- {
- ha_set_all_bits_in_read_set();
- return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- }
- int ha_retrieve_all_pk()
- {
- ha_set_primary_key_in_read_set();
- return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
- }
void position(byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range
*max_key);
@@ -207,6 +198,8 @@ class ha_innobase: public handler
int cmp_ref(const byte *ref1, const byte *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
+ void build_template(struct row_prebuilt_struct *prebuilt, THD *thd,
+ TABLE *table, uint templ_type);
};
extern SHOW_VAR innodb_status_variables[];
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 786d45a4966..fcb15a46661 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -52,7 +52,7 @@ TYPELIB myisam_stats_method_typelib= {
** MyISAM tables
*****************************************************************************/
-static handler *myisam_create_handler(TABLE_SHARE *table);
+static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
/* MyISAM handlerton */
@@ -103,9 +103,9 @@ handlerton myisam_hton= {
};
-static handler *myisam_create_handler(TABLE_SHARE *table)
+static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_myisam(table);
+ return new (mem_root) ha_myisam(table);
}
@@ -192,10 +192,11 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
ha_myisam::ha_myisam(TABLE_SHARE *table_arg)
:handler(&myisam_hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS),
- can_enable_indexes(1)
+ HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
+ HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT),
+ can_enable_indexes(1)
{}
@@ -1318,7 +1319,7 @@ int ha_myisam::rnd_init(bool scan)
{
if (scan)
return mi_scan_init(file);
- return mi_extra(file, HA_EXTRA_RESET, 0);
+ return mi_reset(file); // Free buffers
}
int ha_myisam::rnd_next(byte *buf)
@@ -1358,24 +1359,23 @@ void ha_myisam::info(uint flag)
(void) mi_status(file,&info,flag);
if (flag & HA_STATUS_VARIABLE)
{
- records = info.records;
- deleted = info.deleted;
- data_file_length=info.data_file_length;
- index_file_length=info.index_file_length;
- delete_length = info.delete_length;
- check_time = info.check_time;
- mean_rec_length=info.mean_reclength;
+ stats.records = info.records;
+ stats.deleted = info.deleted;
+ stats.data_file_length=info.data_file_length;
+ stats.index_file_length=info.index_file_length;
+ stats.delete_length = info.delete_length;
+ stats.check_time = info.check_time;
+ stats. mean_rec_length=info.mean_reclength;
}
if (flag & HA_STATUS_CONST)
{
TABLE_SHARE *share= table->s;
- max_data_file_length= info.max_data_file_length;
- max_index_file_length= info.max_index_file_length;
- create_time= info.create_time;
- sortkey= info.sortkey;
+ stats.max_data_file_length= info.max_data_file_length;
+ stats.max_index_file_length= info.max_index_file_length;
+ stats.create_time= info.create_time;
ref_length= info.reflength;
share->db_options_in_use= info.options;
- block_size= myisam_block_size; /* record block size */
+ stats.block_size= myisam_block_size; /* record block size */
/* Update share */
if (share->tmp_table == NO_TMP_TABLE)
@@ -1406,12 +1406,12 @@ void ha_myisam::info(uint flag)
if (flag & HA_STATUS_ERRKEY)
{
errkey = info.errkey;
- my_store_ptr(dupp_ref, ref_length, info.dupp_key_pos);
+ my_store_ptr(dup_ref, ref_length, info.dupp_key_pos);
}
if (flag & HA_STATUS_TIME)
- update_time = info.update_time;
+ stats.update_time = info.update_time;
if (flag & HA_STATUS_AUTO)
- auto_increment_value= info.auto_increment;
+ stats.auto_increment_value= info.auto_increment;
}
@@ -1422,6 +1422,10 @@ int ha_myisam::extra(enum ha_extra_function operation)
return mi_extra(file, operation, 0);
}
+int ha_myisam::reset(void)
+{
+ return mi_reset(file);
+}
/* To be used with WRITE_CACHE and EXTRA_CACHE */
@@ -1465,7 +1469,7 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
ha_myisam::info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
- create_info->auto_increment_value=auto_increment_value;
+ create_info->auto_increment_value= stats.auto_increment_value;
}
create_info->data_file_name=data_file_name;
create_info->index_file_name=index_file_name;
@@ -1699,7 +1703,7 @@ ulonglong ha_myisam::get_auto_increment()
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
ha_myisam::info(HA_STATUS_AUTO);
- return auto_increment_value;
+ return stats.auto_increment_value;
}
/* it's safe to call the following if bulk_insert isn't on */
@@ -1783,7 +1787,7 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
{
uint options= table->s->db_options_in_use;
- if (info->auto_increment_value != auto_increment_value ||
+ if (info->auto_increment_value != stats.auto_increment_value ||
info->data_file_name != data_file_name ||
info->index_file_name != index_file_name ||
table_changes == IS_EQUAL_NO)
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 86efed27478..0b37f181396 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -48,7 +48,7 @@ class ha_myisam: public handler
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
const char **bas_ext() const;
- ulong table_flags() const { return int_table_flags; }
+ ulonglong table_flags() const { return int_table_flags; }
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
@@ -101,6 +101,7 @@ class ha_myisam: public handler
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
+ int reset(void);
int external_lock(THD *thd, int lock_type);
int delete_all_rows(void);
int disable_indexes(uint mode);
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 1cde37644bc..17c0751e747 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -34,7 +34,8 @@
** MyISAM MERGE tables
*****************************************************************************/
-static handler *myisammrg_create_handler(TABLE_SHARE *table);
+static handler *myisammrg_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
/* MyISAM MERGE handlerton */
@@ -80,9 +81,10 @@ handlerton myisammrg_hton= {
NULL /* release_temporary_latches */
};
-static handler *myisammrg_create_handler(TABLE_SHARE *table)
+static handler *myisammrg_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
{
- return new ha_myisammrg(table);
+ return new (mem_root) ha_myisammrg(table);
}
@@ -134,10 +136,10 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
myrg_extra(file,HA_EXTRA_WAIT_LOCK,0);
- if (table->s->reclength != mean_rec_length && mean_rec_length)
+ if (table->s->reclength != stats.mean_rec_length && stats.mean_rec_length)
{
DBUG_PRINT("error",("reclength: %d mean_rec_length: %d",
- table->s->reclength, mean_rec_length));
+ table->s->reclength, stats.mean_rec_length));
goto err;
}
#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
@@ -258,11 +260,13 @@ int ha_myisammrg::index_next_same(byte * buf,
return error;
}
+
int ha_myisammrg::rnd_init(bool scan)
{
- return myrg_extra(file,HA_EXTRA_RESET,0);
+ return myrg_reset(file);
}
+
int ha_myisammrg::rnd_next(byte *buf)
{
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
@@ -272,6 +276,7 @@ int ha_myisammrg::rnd_next(byte *buf)
return error;
}
+
int ha_myisammrg::rnd_pos(byte * buf, byte *pos)
{
statistic_increment(table->in_use->status_var.ha_read_rnd_count,
@@ -303,18 +308,18 @@ void ha_myisammrg::info(uint flag)
The following fails if one has not compiled MySQL with -DBIG_TABLES
and one has more than 2^32 rows in the merge tables.
*/
- records = (ha_rows) info.records;
- deleted = (ha_rows) info.deleted;
+ stats.records = (ha_rows) info.records;
+ stats.deleted = (ha_rows) info.deleted;
#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
if ((info.records >= (ulonglong) 1 << 32) ||
(info.deleted >= (ulonglong) 1 << 32))
table->s->crashed= 1;
#endif
- data_file_length=info.data_file_length;
+ stats.data_file_length=info.data_file_length;
errkey = info.errkey;
table->s->keys_in_use.set_prefix(table->s->keys);
table->s->db_options_in_use= info.options;
- mean_rec_length= info.reclength;
+ stats.mean_rec_length= info.reclength;
/*
The handler::block_size is used all over the code in index scan cost
@@ -332,11 +337,11 @@ void ha_myisammrg::info(uint flag)
TODO: In 5.2 index scan cost calculation will be factored out into a
virtual function in class handler and we'll be able to remove this hack.
*/
- block_size= 0;
+ stats.block_size= 0;
if (file->tables)
- block_size= myisam_block_size / file->tables;
+ stats.block_size= myisam_block_size / file->tables;
- update_time=0;
+ stats.update_time= 0;
#if SIZEOF_OFF_T > 4
ref_length=6; // Should be big enough
#else
@@ -362,6 +367,10 @@ int ha_myisammrg::extra(enum ha_extra_function operation)
return myrg_extra(file,operation,0);
}
+int ha_myisammrg::reset(void)
+{
+ return myrg_reset(file);
+}
/* To be used with WRITE_CACHE, EXTRA_CACHE and BULK_INSERT_BEGIN */
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
index 4327b1c17b9..b67ec3dc204 100644
--- a/sql/ha_myisammrg.h
+++ b/sql/ha_myisammrg.h
@@ -33,9 +33,9 @@ class ha_myisammrg: public handler
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
const char *index_type(uint key_number);
- ulong table_flags() const
+ ulonglong table_flags() const
{
- return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
+ return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_NO_TRANSACTIONS |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE |
HA_NO_COPY_ON_ALTER);
@@ -50,7 +50,7 @@ class ha_myisammrg: public handler
uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
double scan_time()
- { return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
+ { return ulonglong2double(stats.data_file_length) / IO_SIZE + file->tables; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
@@ -73,6 +73,7 @@ class ha_myisammrg: public handler
void position(const byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
void info(uint);
+ int reset(void);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int external_lock(THD *thd, int lock_type);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index b84350fd3b8..119171d1229 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -84,9 +84,10 @@ handlerton ndbcluster_hton = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
-static handler *ndbcluster_create_handler(TABLE_SHARE *table)
+static handler *ndbcluster_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
{
- return new ha_ndbcluster(table);
+ return new (mem_root) ha_ndbcluster(table);
}
static uint ndbcluster_partition_flags()
@@ -437,9 +438,10 @@ void ha_ndbcluster::records_update()
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
- if (ndb_get_table_statistics(ndb, m_table, &stat) == 0){
- mean_rec_length= stat.row_size;
- data_file_length= stat.fragment_memory;
+ if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
+ {
+ stats.mean_rec_length= stat.row_size;
+ stats.data_file_length= stat.fragment_memory;
info->records= stat.row_count;
}
}
@@ -448,7 +450,7 @@ void ha_ndbcluster::records_update()
if (get_thd_ndb(thd)->error)
info->no_uncommitted_rows_count= 0;
}
- records= info->records+ info->no_uncommitted_rows_count;
+ stats.records= info->records+ info->no_uncommitted_rows_count;
DBUG_VOID_RETURN;
}
@@ -896,23 +898,24 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op)
/*
Check if any set or get of blob value in current query.
*/
+
bool ha_ndbcluster::uses_blob_value()
{
+ uint blob_fields;
+ MY_BITMAP *bitmap;
+ uint *blob_index, *blob_index_end;
if (table_share->blob_fields == 0)
return FALSE;
+
+ bitmap= m_write_op ? table->write_set : table->read_set;
+ blob_index= table_share->blob_field;
+ blob_index_end= blob_index + table_share->blob_fields;
+ do
{
- uint no_fields= table_share->fields;
- int i;
- // They always put blobs at the end..
- for (i= no_fields - 1; i >= 0; i--)
- {
- if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
- (!m_write_op && ha_get_bit_in_read_set(i+1)))
- {
- return TRUE;
- }
- }
- }
+ if (bitmap_is_set(table->write_set,
+ table->field[*blob_index]->field_index))
+ return TRUE;
+ } while (++blob_index != blob_index_end);
return FALSE;
}
@@ -1409,10 +1412,9 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
if (type >= TL_WRITE_ALLOW_WRITE)
return NdbOperation::LM_Exclusive;
- else if (uses_blob_value())
+ if (uses_blob_value())
return NdbOperation::LM_Read;
- else
- return NdbOperation::LM_CommittedRead;
+ return NdbOperation::LM_CommittedRead;
}
static const ulong index_type_flags[]=
@@ -1587,13 +1589,13 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
- if (ha_get_bit_in_read_set(i+1) ||
+ if (bitmap_is_set(table->read_set, i) ||
((field->flags & PRI_KEY_FLAG)))
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
- else
+ else
{
m_value[i].ptr= NULL;
}
@@ -1697,7 +1699,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
DBUG_ENTER("complemented_read");
m_write_op= FALSE;
- if (ha_get_all_bit_in_read_set())
+ if (bitmap_is_set_all(table->read_set))
{
// We have allready retrieved all fields, nothing to complement
DBUG_RETURN(0);
@@ -1728,7 +1730,8 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (ha_get_bit_in_read_set(i+1))))
+ bitmap_is_set(table->read_set, i)) &&
+ !bitmap_is_set(table->write_set, i))
{
if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
@@ -1752,7 +1755,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (ha_get_bit_in_read_set(i+1))))
+ bitmap_is_set(table->read_set, i)))
{
m_value[i].ptr= NULL;
}
@@ -1853,11 +1856,11 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record)
uint32 part_id;
int error;
longlong func_value;
- if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
- &func_value)))
- {
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (error)
DBUG_RETURN(error);
- }
op->setPartitionId(part_id);
}
}
@@ -2449,11 +2452,11 @@ int ha_ndbcluster::write_row(byte *record)
{
uint32 part_id;
int error;
- if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
- &func_value)))
- {
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (error)
DBUG_RETURN(error);
- }
op->setPartitionId(part_id);
}
@@ -2475,25 +2478,27 @@ int ha_ndbcluster::write_row(byte *record)
}
else
{
- int res;
-
- if ((res= set_primary_key_from_record(op, record)))
- return res;
+ int error;
+ if ((error= set_primary_key_from_record(op, record)))
+ DBUG_RETURN(error);
}
// Set non-key attribute(s)
bool set_blob_value= FALSE;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
- (ha_get_bit_in_write_set(i + 1) || !m_use_write) &&
+ (bitmap_is_set(table->write_set, i) || !m_use_write) &&
set_ndb_value(op, field, i, record-table->record[0], &set_blob_value))
{
m_skip_auto_increment= TRUE;
+ dbug_tmp_restore_column_map(table->read_set, old_map);
ERR_RETURN(op->getNdbError());
}
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
if (m_use_partition_function)
{
@@ -2571,6 +2576,7 @@ int ha_ndbcluster::write_row(byte *record)
}
m_skip_auto_increment= TRUE;
+ DBUG_PRINT("exit",("ok"));
DBUG_RETURN(0);
}
@@ -2630,7 +2636,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
{
table->timestamp_field->set_time();
- ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
+ bitmap_set_bit(table->write_set, table->timestamp_field->field_index);
}
if (m_use_partition_function &&
@@ -2744,14 +2750,19 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
m_rows_changed++;
// Set non-key attribute(s)
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
- if (ha_get_bit_in_write_set(i+1) &&
+ if (bitmap_is_set(table->write_set, i) &&
(!(field->flags & PRI_KEY_FLAG)) &&
set_ndb_value(op, field, i, new_data - table->record[0]))
+ {
+ dbug_tmp_restore_column_map(table->read_set, old_map);
ERR_RETURN(op->getNdbError());
+ }
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
if (m_use_partition_function)
{
@@ -2843,9 +2854,8 @@ int ha_ndbcluster::delete_row(const byte *record)
}
else
{
- int res;
- if ((res= set_primary_key_from_record(op, record)))
- return res;
+ if ((error= set_primary_key_from_record(op, record)))
+ DBUG_RETURN(error);
}
}
@@ -2876,6 +2886,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
{
Field **p_field= table->field, *field= *p_field;
uint row_offset= (uint) (buf - table->record[0]);
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
DBUG_ENTER("ndb_unpack_record");
// Set null flag(s)
@@ -2926,13 +2937,13 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
field->ptr= save_field_ptr;
DBUG_PRINT("info",("[%u] SET",
(*value).rec->getColumn()->getColumnNo()));
- DBUG_DUMP("info", (const char*) field->ptr, field->field_length);
+ DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
}
else
{
DBUG_PRINT("info",("[%u] SET",
(*value).rec->getColumn()->getColumnNo()));
- DBUG_DUMP("info", (const char*) field->ptr, field->field_length);
+ DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
}
}
else
@@ -2965,6 +2976,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
}
}
}
+ dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_VOID_RETURN;
}
@@ -3487,7 +3499,7 @@ void ha_ndbcluster::info(uint flag)
if (m_table_info)
{
if (m_ha_not_exact_count)
- records= 100;
+ stats.records= 100;
else
records_update();
}
@@ -3501,14 +3513,14 @@ void ha_ndbcluster::info(uint flag)
if (current_thd->variables.ndb_use_exact_count &&
ndb_get_table_statistics(ndb, m_table, &stat) == 0)
{
- mean_rec_length= stat.row_size;
- data_file_length= stat.fragment_memory;
- records= stat.row_count;
+ stats.mean_rec_length= stat.row_size;
+ stats.data_file_length= stat.fragment_memory;
+ stats.records= stat.row_count;
}
else
{
- mean_rec_length= 0;
- records= 100;
+ stats.mean_rec_length= 0;
+ stats.records= 100;
}
}
}
@@ -3529,8 +3541,7 @@ void ha_ndbcluster::info(uint flag)
{
Ndb *ndb= get_ndb();
- auto_increment_value=
- ndb->readAutoIncrementValue(m_table);
+ stats.auto_increment_value= ndb->readAutoIncrementValue(m_table);
}
}
DBUG_VOID_RETURN;
@@ -3554,18 +3565,6 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
{
DBUG_ENTER("extra");
switch (operation) {
- case HA_EXTRA_RESET: /* Reset database to after open */
- DBUG_PRINT("info", ("HA_EXTRA_RESET"));
- DBUG_PRINT("info", ("Clearing condition stack"));
- cond_clear();
- /*
- * Regular partition pruning will set the bitmap appropriately.
- * Some queries like ALTER TABLE doesn't use partition pruning and
- * thus the 'used_partitions' bitmap needs to be initialized
- */
- if (m_part_info)
- bitmap_set_all(&m_part_info->used_partitions);
- break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
@@ -3601,6 +3600,22 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_RETURN(0);
}
+
+int ha_ndbcluster::reset()
+{
+ DBUG_ENTER("ha_ndbcluster::reset");
+ cond_clear();
+ /*
+ Regular partition pruning will set the bitmap appropriately.
+ Some queries like ALTER TABLE doesn't use partition pruning and
+ thus the 'used_partitions' bitmap needs to be initialized
+ */
+ if (m_part_info)
+ bitmap_set_all(&m_part_info->used_partitions);
+ DBUG_RETURN(0);
+}
+
+
/*
Start of an insert, remember number of rows to be inserted, it will
be used in write_row and get_autoincrement to send an optimal number
@@ -3717,7 +3732,7 @@ const char** ha_ndbcluster::bas_ext() const
double ha_ndbcluster::scan_time()
{
DBUG_ENTER("ha_ndbcluster::scan_time()");
- double res= rows2double(records*1000);
+ double res= rows2double(stats.records*1000);
DBUG_PRINT("exit", ("table: %s value: %f",
m_tabname, res));
DBUG_RETURN(res);
@@ -3801,8 +3816,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
{
int error=0;
NdbTransaction* trans= NULL;
-
DBUG_ENTER("external_lock");
+
/*
Check that this handler instance has a connection
set up to the Ndb object of thd
@@ -3813,9 +3828,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
- DBUG_PRINT("enter", ("this: %x thd: %lx thd_ndb: %lx "
+ DBUG_PRINT("enter", ("this: 0x%lx thd: 0x%lx thd_ndb: %lx "
"thd_ndb->lock_count: %d",
- this, thd, thd_ndb, thd_ndb->lock_count));
+ (long) this, (long) thd, (long) thd_ndb,
+ thd_ndb->lock_count));
if (lock_type != F_UNLCK)
{
@@ -5056,7 +5072,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
/* Drop the table from NDB */
- int res;
+ int res= 0;
if (h && h->m_table)
{
if (dict->dropTableGlobal(*h->m_table))
@@ -5084,7 +5100,6 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
ndb_table_id= ndbtab_g.get_table()->getObjectId();
ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
#endif
- res= 0;
}
else if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT)
{
@@ -5252,7 +5267,9 @@ ulonglong ha_ndbcluster::get_auto_increment()
HA_NEED_READ_RANGE_BUFFER | \
HA_CAN_GEOMETRY | \
HA_CAN_BIT_FIELD | \
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
+ HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
+ HA_PARTIAL_COLUMN_READ
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
handler(&ndbcluster_hton, table_arg),
@@ -5295,8 +5312,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
m_tabname[0]= '\0';
m_dbname[0]= '\0';
- records= ~(ha_rows)0; // uninitialized
- block_size= 1024;
+ stats.records= ~(ha_rows)0; // uninitialized
+ stats.block_size= 1024;
for (i= 0; i < MAX_KEY; i++)
ndb_init_index(m_index[i]);
@@ -5620,7 +5637,7 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db,
{
Ndb* ndb;
DBUG_ENTER("ndbcluster_table_exists_in_engine");
- DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
+ DBUG_PRINT("enter", ("db: %s name: %s", db, name));
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
@@ -5629,14 +5646,13 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db,
NdbDictionary::Dictionary::List list;
if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
ERR_RETURN(dict->getNdbError());
- for (int i= 0 ; i < list.count ; i++)
+ for (uint i= 0 ; i < list.count ; i++)
{
NdbDictionary::Dictionary::List::Element& elmt= list.elements[i];
if (my_strcasecmp(system_charset_info, elmt.database, db))
continue;
if (my_strcasecmp(system_charset_info, elmt.name, name))
continue;
- // table found
DBUG_PRINT("info", ("Found table"));
DBUG_RETURN(1);
}
@@ -5766,6 +5782,8 @@ int ndbcluster_find_all_files(THD *thd)
NDBDICT *dict= ndb->getDictionary();
int unhandled, retries= 5, skipped;
+ LINT_INIT(unhandled);
+ LINT_INIT(skipped);
do
{
NdbDictionary::Dictionary::List list;
@@ -6268,9 +6286,11 @@ void ha_ndbcluster::print_error(int error, myf errflag)
if (error == HA_ERR_NO_PARTITION_FOUND)
{
char buf[100];
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
m_part_info->part_expr->null_value ? "NULL" :
llstr(m_part_info->part_expr->val_int(), buf));
+ dbug_tmp_restore_column_map(table->read_set, old_map);
}
else
handler::print_error(error, errflag);
@@ -6488,12 +6508,11 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
DBUG_RETURN(10); /* Good guess when you don't know anything */
}
-ulong ha_ndbcluster::table_flags(void) const
+ulonglong ha_ndbcluster::table_flags(void) const
{
if (m_ha_not_exact_count)
- return m_table_flags | HA_NOT_EXACT_COUNT;
- else
- return m_table_flags;
+ return m_table_flags & ~HA_STATS_RECORDS_IS_EXACT;
+ return m_table_flags;
}
const char * ha_ndbcluster::table_type() const
{
@@ -6527,10 +6546,6 @@ bool ha_ndbcluster::low_byte_first() const
return TRUE;
#endif
}
-bool ha_ndbcluster::has_transactions()
-{
- return TRUE;
-}
const char* ha_ndbcluster::index_type(uint key_number)
{
switch (get_index_type(key_number)) {
@@ -9585,7 +9600,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
for (i= 0; i < part_info->part_field_list.elements; i++)
{
- NDBCOL *col= tab->getColumn(fields[i]->fieldnr - 1);
+ NDBCOL *col= tab->getColumn(fields[i]->field_index);
DBUG_PRINT("info",("setting dist key on %s", col->getName()));
col->setPartitionKey(TRUE);
}
@@ -9675,7 +9690,7 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
{
Field *field= table->field[i];
const NDBCOL *col= tab->getColumn(field->field_name);
- if (field->add_index &&
+ if ((field->flags & FIELD_IN_ADD_INDEX) &&
col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
{
DBUG_PRINT("info", ("add/drop index not supported for disk stored column"));
@@ -9951,10 +9966,11 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
const NDBTAB *tab;
int err;
DBUG_ENTER("ha_ndbcluster::get_no_parts");
+ LINT_INIT(err);
set_dbname(name);
set_tabname(name);
- do
+ for (;;)
{
if (check_ndb_connection())
{
@@ -9968,22 +9984,21 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
ERR_BREAK(dict->getNdbError(), err);
*no_parts= ndbtab_g.get_table()->getFragmentCount();
DBUG_RETURN(FALSE);
- } while (1);
+ }
-end:
print_error(err, MYF(0));
DBUG_RETURN(TRUE);
}
-static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
+static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables,
+ COND *cond)
{
TABLE* table= tables->table;
Ndb *ndb= check_ndb_in_thd(thd);
NdbDictionary::Dictionary* dict= ndb->getDictionary();
NdbDictionary::Dictionary::List dflist;
NdbError ndberr;
- unsigned i;
-
+ uint i;
DBUG_ENTER("ndbcluster_fill_files_table");
dict->listObjects(dflist, NdbDictionary::Object::Datafile);
@@ -9995,12 +10010,13 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
{
NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i];
Ndb_cluster_connection_node_iter iter;
- unsigned id;
-
+ uint id;
+
g_ndb_cluster_connection->init_get_next_node(iter);
while ((id= g_ndb_cluster_connection->get_next_node(iter)))
{
+ uint c= 0;
NdbDictionary::Datafile df= dict->getDatafile(id, elt.name);
ndberr= dict->getNdbError();
if(ndberr.classification != NdbError::NoError)
@@ -10018,7 +10034,6 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
ERR_RETURN(ndberr);
}
- int c= 0;
table->field[c++]->set_null(); // FILE_ID
table->field[c++]->store(elt.name, strlen(elt.name),
system_charset_info);
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index f407cb0090f..aa6ac2024fc 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -335,7 +335,12 @@ class Ndb_item {
const Item *item= value.item;
if (item && field)
- ((Item *)item)->save_in_field(field, false);
+ {
+ my_bitmap_map *old_map=
+ dbug_tmp_use_all_columns(field->table, field->table->write_set);
+ ((Item *)item)->save_in_field(field, FALSE);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ }
};
static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
@@ -606,12 +611,13 @@ class ha_ndbcluster: public handler
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
+ int reset();
int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
void print_error(int error, myf errflag);
const char * table_type() const;
const char ** bas_ext() const;
- ulong table_flags(void) const;
+ ulonglong table_flags(void) const;
void prepare_for_alter();
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
@@ -638,7 +644,6 @@ class ha_ndbcluster: public handler
enum thr_lock_type lock_type);
bool low_byte_first() const;
- bool has_transactions();
virtual bool is_injective() const { return true; }
@@ -676,7 +681,7 @@ static void set_tabname(const char *pathname, char *tabname);
AND ... AND pushed_condN)
or less restrictive condition, depending on handler's capabilities.
- handler->extra(HA_EXTRA_RESET) call empties the condition stack.
+ handler->reset() call empties the condition stack.
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
condition stack.
The current implementation supports arbitrary AND/OR nested conditions
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 922380f47f9..47fe2796aac 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -263,6 +263,14 @@ ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
DBUG_VOID_RETURN;
}
+
+/*
+ Creates a TABLE object for the ndb cluster table
+
+ NOTES
+ This does not open the underlying table
+*/
+
static int
ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
TABLE_SHARE *table_share, TABLE *table)
@@ -310,6 +318,8 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
share->table_share= table_share;
DBUG_ASSERT(share->table == 0);
share->table= table;
+ /* We can't use 'use_all_columns()' as the file object is not setup yet */
+ table->column_bitmaps_set_no_signal(&table->s->all_set, &table->s->all_set);
#ifndef DBUG_OFF
dbug_print_table("table", table);
#endif
@@ -343,7 +353,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
{
bitmap_init(&share->subscriber_bitmap[i],
(Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
- max_ndb_nodes, false);
+ max_ndb_nodes, FALSE);
bitmap_clear_all(&share->subscriber_bitmap[i]);
}
}
@@ -867,6 +877,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
/* unpack blob values */
byte* blobs_buffer= 0;
uint blobs_buffer_size= 0;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
{
ptrdiff_t ptrdiff= 0;
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
@@ -876,7 +887,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
{
my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
DBUG_PRINT("info", ("blob read error"));
- DBUG_ASSERT(false);
+ DBUG_ASSERT(FALSE);
}
}
/* db varchar 1 length byte */
@@ -928,6 +939,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
s->type= ((Field_long *)*field)->val_int();
/* free blobs buffer */
my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ dbug_tmp_restore_column_map(table->read_set, old_map);
}
/*
@@ -1073,7 +1085,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
{
int i, updated= 0;
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
- bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false);
+ bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE);
bitmap_set_all(&schema_subscribers);
(void) pthread_mutex_lock(&schema_share->mutex);
for (i= 0; i < no_storage_nodes; i++)
@@ -1333,7 +1345,7 @@ ndbcluster_update_slock(THD *thd,
MY_BITMAP slock;
uint32 bitbuf[SCHEMA_SLOCK_SIZE/4];
- bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, false);
+ bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, FALSE);
if (ndbtab == 0)
{
@@ -1655,7 +1667,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
Cluster_schema *schema= (Cluster_schema *)
sql_alloc(sizeof(Cluster_schema));
MY_BITMAP slock;
- bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, false);
+ bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE);
uint node_id= g_ndb_cluster_connection->node_id();
ndbcluster_get_schema(tmp_share, schema);
if (schema->node_id != node_id)
@@ -1874,7 +1886,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
{
char key[FN_REFLEN];
build_table_filename(key, sizeof(key), schema->db, schema->name, "");
- NDB_SHARE *share= get_share(key, 0, false, false);
+ NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
switch (schema_type)
{
@@ -1935,7 +1947,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
}
break;
default:
- DBUG_ASSERT(false);
+ DBUG_ASSERT(FALSE);
}
if (share)
{
@@ -2012,18 +2024,20 @@ static int open_binlog_index(THD *thd, TABLE_LIST *tables,
}
*binlog_index= tables->table;
thd->proc_info= save_proc_info;
+ (*binlog_index)->use_all_columns();
return 0;
}
+
/*
Insert one row in the binlog_index
*/
+
int ndb_add_binlog_index(THD *thd, void *_row)
{
Binlog_index_row &row= *(Binlog_index_row *) _row;
int error= 0;
bool need_reopen;
-
/*
Turn of binlogging to prevent the table changes to be written to
the binary log.
@@ -2065,10 +2079,9 @@ int ndb_add_binlog_index(THD *thd, void *_row)
binlog_index->field[5]->store(row.n_deletes);
binlog_index->field[6]->store(row.n_schemaops);
- int r;
- if ((r= binlog_index->file->ha_write_row(binlog_index->record[0])))
+ if ((error= binlog_index->file->ha_write_row(binlog_index->record[0])))
{
- sql_print_error("NDB Binlog: Writing row to binlog_index: %d", r);
+ sql_print_error("NDB Binlog: Writing row to binlog_index: %d", error);
error= -1;
goto add_binlog_index_err;
}
@@ -2195,7 +2208,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
}
/* Create share which is needed to hold replication information */
- if (!(share= get_share(key, 0, true, true)))
+ if (!(share= get_share(key, 0, TRUE, TRUE)))
{
sql_print_error("NDB Binlog: "
"allocating table share for %s failed", key);
@@ -2345,7 +2358,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
}
}
if (share->flags & NSF_BLOB_FLAG)
- my_event.mergeEvents(true);
+ my_event.mergeEvents(TRUE);
/* add all columns to the event */
int n_cols= ndbtab->getNoOfColumns();
@@ -2532,7 +2545,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
}
if (share->flags & NSF_BLOB_FLAG)
- op->mergeEvents(true); // currently not inherited from event
+ op->mergeEvents(TRUE); // currently not inherited from event
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x",
share->ndb_value[0]));
@@ -2679,7 +2692,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
share->op->getState() == NdbEventOperation::EO_EXECUTING &&
dict->getNdbError().code != 4009)
{
- DBUG_ASSERT(false);
+ DBUG_ASSERT(FALSE);
DBUG_RETURN(-1);
}
}
@@ -2800,7 +2813,7 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
/* make sure to flush any pending events as they can be dependent
on one of the tables being changed below
*/
- thd->binlog_flush_pending_rows_event(true);
+ thd->binlog_flush_pending_rows_event(TRUE);
switch (type)
{
@@ -2885,7 +2898,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
/* Potential buffer for the bitmap */
uint32 bitbuf[128 / (sizeof(uint32) * 8)];
bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL,
- n_fields, false);
+ n_fields, FALSE);
bitmap_set_all(&b);
/*
@@ -2918,7 +2931,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
}
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
int ret= trans.write_row(::server_id,
- injector::transaction::table(table, true),
+ injector::transaction::table(table, TRUE),
&b, n_fields, table->record[0]);
DBUG_ASSERT(ret == 0);
}
@@ -2956,7 +2969,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
DBUG_EXECUTE("info", print_records(table, table->record[n]););
int ret= trans.delete_row(::server_id,
- injector::transaction::table(table, true),
+ injector::transaction::table(table, TRUE),
&b, n_fields, table->record[n]);
DBUG_ASSERT(ret == 0);
}
@@ -2983,7 +2996,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
since table has a primary key, we can do a write
using only after values
*/
- trans.write_row(::server_id, injector::transaction::table(table, true),
+ trans.write_row(::server_id, injector::transaction::table(table, TRUE),
&b, n_fields, table->record[0]);// after values
}
else
@@ -3003,7 +3016,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
DBUG_EXECUTE("info", print_records(table, table->record[1]););
int ret= trans.update_row(::server_id,
- injector::transaction::table(table, true),
+ injector::transaction::table(table, TRUE),
&b, n_fields,
table->record[1], // before values
table->record[0]);// after values
@@ -3095,7 +3108,7 @@ static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
}
pthread_mutex_init(&ndb_schema_object->mutex, MY_MUTEX_INIT_FAST);
bitmap_init(&ndb_schema_object->slock_bitmap, ndb_schema_object->slock,
- sizeof(ndb_schema_object->slock)*8, false);
+ sizeof(ndb_schema_object->slock)*8, FALSE);
bitmap_clear_all(&ndb_schema_object->slock_bitmap);
break;
}
@@ -3434,7 +3447,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
inj->new_trans(thd, &trans);
}
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
- injector::transaction::table tbl(table, true);
+ injector::transaction::table tbl(table, TRUE);
int ret= trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
}
@@ -3447,20 +3460,14 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
const LEX_STRING& name=table->s->table_name;
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
- injector::transaction::table tbl(table, true);
+ injector::transaction::table tbl(table, TRUE);
int ret= trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
-
- MY_BITMAP b;
- uint32 bitbuf;
- DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8);
- bitmap_init(&b, &bitbuf, table->s->fields, false);
- bitmap_set_all(&b);
table->field[0]->store((longlong)::server_id);
table->field[1]->store((longlong)gci);
trans.write_row(::server_id,
- injector::transaction::table(table, true),
- &b, table->s->fields,
+ injector::transaction::table(table, TRUE),
+ &table->s->all_set, table->s->fields,
table->record[0]);
}
else
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index b1a5a447b6f..1d12d1967d3 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -69,7 +69,8 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
MODULE create/delete handler object
****************************************************************************/
-static handler *partition_create_handler(TABLE_SHARE *share);
+static handler *partition_create_handler(TABLE_SHARE *share,
+ MEM_ROOT *mem_root);
static uint partition_flags();
static uint alter_table_flags(uint flags);
@@ -125,9 +126,16 @@ handlerton partition_hton = {
New partition object
*/
-static handler *partition_create_handler(TABLE_SHARE *share)
+static handler *partition_create_handler(TABLE_SHARE *share,
+ MEM_ROOT *mem_root)
{
- return new ha_partition(share);
+ ha_partition *file= new (mem_root) ha_partition(share);
+ if (file && file->initialise_partition(mem_root))
+ {
+ delete file;
+ file= 0;
+ }
+ return file;
}
/*
@@ -229,7 +237,6 @@ void ha_partition::init_handler_variables()
m_reorged_parts= 0;
m_added_file= NULL;
m_tot_parts= 0;
- m_has_transactions= 0;
m_pkey_is_clustered= 0;
m_lock_type= F_UNLCK;
m_part_spec.start_part= NO_CURRENT_PART_ID;
@@ -301,7 +308,8 @@ ha_partition::~ha_partition()
Initialise partition handler object
SYNOPSIS
- ha_initialise()
+ initialise_partition()
+ mem_root Allocate memory through this
RETURN VALUE
1 Error
@@ -341,16 +349,16 @@ ha_partition::~ha_partition()
*/
-int ha_partition::ha_initialise()
+bool ha_partition::initialise_partition(MEM_ROOT *mem_root)
{
handler **file_array, *file;
- DBUG_ENTER("ha_partition::ha_initialise");
+ DBUG_ENTER("ha_partition::initialise_partition");
if (m_create_handler)
{
m_tot_parts= m_part_info->get_tot_partitions();
DBUG_ASSERT(m_tot_parts > 0);
- if (new_handlers_from_part_info())
+ if (new_handlers_from_part_info(mem_root))
DBUG_RETURN(1);
}
else if (!table_share || !table_share->normalized_path.str)
@@ -363,7 +371,7 @@ int ha_partition::ha_initialise()
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
}
- else if (get_from_handler_file(table_share->normalized_path.str))
+ else if (get_from_handler_file(table_share->normalized_path.str, mem_root))
{
mem_alloc_error(2);
DBUG_RETURN(1);
@@ -377,12 +385,11 @@ int ha_partition::ha_initialise()
other parameters are calculated on demand.
HA_FILE_BASED is always set for partition handler since we use a
special file for handling names of partitions, engine types.
- HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPP_POS,
+ HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= m_file[0]->table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
- m_has_transactions= TRUE;
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
@@ -394,13 +401,11 @@ int ha_partition::ha_initialise()
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
- if (!file->has_transactions())
- m_has_transactions= FALSE;
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
} while (*(++file_array));
- m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPP_POS |
+ m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
@@ -1388,9 +1393,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
uint j= 0;
do
{
- if (!(new_file_array[part_count++]= get_new_handler(table->s,
- thd->mem_root,
- part_elem->engine_type)))
+ if (!(new_file_array[part_count++]=
+ get_new_handler(table->s,
+ thd->mem_root,
+ part_elem->engine_type)))
{
mem_alloc_error(sizeof(handler));
DBUG_RETURN(ER_OUTOFMEMORY);
@@ -1644,7 +1650,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
handler **file;
DBUG_ENTER("del_ren_cre_table()");
- if (get_from_handler_file(from))
+ if (get_from_handler_file(from, current_thd->mem_root))
DBUG_RETURN(TRUE);
DBUG_ASSERT(m_file_buffer);
name_buffer_ptr= m_name_buffer_ptr;
@@ -1953,7 +1959,6 @@ void ha_partition::clear_handler_file()
my_free((char*) m_file_buffer, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*) m_engine_array, MYF(MY_ALLOW_ZERO_PTR));
m_file_buffer= NULL;
- m_name_buffer_ptr= NULL;
m_engine_array= NULL;
}
@@ -1962,29 +1967,29 @@ void ha_partition::clear_handler_file()
SYNOPSIS
create_handlers()
+ mem_root Allocate memory through this
RETURN VALUE
TRUE Error
FALSE Success
*/
-bool ha_partition::create_handlers()
+bool ha_partition::create_handlers(MEM_ROOT *mem_root)
{
uint i;
uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
DBUG_ENTER("create_handlers");
- if (!(m_file= (handler **) sql_alloc(alloc_len)))
+ if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
DBUG_RETURN(TRUE);
- bzero(m_file, alloc_len);
+ bzero((char*) m_file, alloc_len);
for (i= 0; i < m_tot_parts; i++)
{
- if (!(m_file[i]= get_new_handler(table_share, current_thd->mem_root,
+ if (!(m_file[i]= get_new_handler(table_share, mem_root,
m_engine_array[i])))
DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
}
- m_file[m_tot_parts]= 0;
/* For the moment we only support partition over the same table engine */
if (m_engine_array[0] == &myisam_hton)
{
@@ -2005,13 +2010,14 @@ bool ha_partition::create_handlers()
SYNOPSIS
new_handlers_from_part_info()
+ mem_root Allocate memory through this
RETURN VALUE
TRUE Error
FALSE Success
*/
-bool ha_partition::new_handlers_from_part_info()
+bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
{
uint i, j, part_count;
partition_element *part_elem;
@@ -2020,12 +2026,12 @@ bool ha_partition::new_handlers_from_part_info()
THD *thd= current_thd;
DBUG_ENTER("ha_partition::new_handlers_from_part_info");
- if (!(m_file= (handler **) sql_alloc(alloc_len)))
+ if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
{
mem_alloc_error(alloc_len);
goto error_end;
}
- bzero(m_file, alloc_len);
+ bzero((char*) m_file, alloc_len);
DBUG_ASSERT(m_part_info->no_parts > 0);
i= 0;
@@ -2041,8 +2047,8 @@ bool ha_partition::new_handlers_from_part_info()
{
for (j= 0; j < m_part_info->no_subparts; j++)
{
- if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
- part_elem->engine_type)))
+ if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
+ part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u",
(uint) ha_legacy_type(part_elem->engine_type)));
@@ -2050,7 +2056,7 @@ bool ha_partition::new_handlers_from_part_info()
}
else
{
- if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root,
+ if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u",
@@ -2076,6 +2082,7 @@ error_end:
SYNOPSIS
get_from_handler_file()
name Full path of table name
+ mem_root Allocate memory through this
RETURN VALUE
TRUE Error
@@ -2086,7 +2093,7 @@ error_end:
partitions.
*/
-bool ha_partition::get_from_handler_file(const char *name)
+bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
{
char buff[FN_REFLEN], *address_tot_name_len;
File file;
@@ -2125,7 +2132,8 @@ bool ha_partition::get_from_handler_file(const char *name)
goto err2;
for (i= 0; i < m_tot_parts; i++)
engine_array[i]= ha_resolve_by_legacy_type(current_thd,
- (enum legacy_db_type) *(uchar *) ((file_buffer) + 12 + i));
+ (enum legacy_db_type)
+ *(uchar *) ((file_buffer) + 12 + i));
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
if (len_words != (tot_partition_words + tot_name_words + 4))
@@ -2135,7 +2143,7 @@ bool ha_partition::get_from_handler_file(const char *name)
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= name_buffer_ptr;
m_engine_array= engine_array;
- if (!m_file && create_handlers())
+ if (!m_file && create_handlers(mem_root))
{
clear_handler_file();
DBUG_RETURN(TRUE);
@@ -2189,7 +2197,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_mode= mode;
m_open_test_lock= test_if_locked;
m_part_field_array= m_part_info->full_part_field_array;
- if (get_from_handler_file(name))
+ if (get_from_handler_file(name, &table->mem_root))
DBUG_RETURN(1);
m_start_key.length= 0;
m_rec0= table->record[0];
@@ -2226,6 +2234,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
bitmap_set_all(&(m_part_info->used_partitions));
+ /* Recalculate table flags as they may change after open */
+ m_table_flags= m_file[0]->table_flags();
file= m_file;
do
{
@@ -2237,7 +2247,11 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_no_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
set_if_bigger(ref_length, ((*file)->ref_length));
+ m_table_flags&= (*file)->table_flags();
} while (*(++file));
+ m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
+ HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
+ m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
/*
Add 2 bytes for partition id in position ref length.
@@ -2600,6 +2614,7 @@ int ha_partition::write_row(byte * buf)
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
#ifdef NOT_NEEDED
if (likely(buf == rec0))
#endif
@@ -2614,6 +2629,7 @@ int ha_partition::write_row(byte * buf)
set_field_ptr(m_part_field_array, rec0, buf);
}
#endif
+ dbug_tmp_restore_column_map(table->read_set, old_map);
if (unlikely(error))
DBUG_RETURN(error);
m_last_part= part_id;
@@ -4070,7 +4086,7 @@ void ha_partition::include_partition_fields_in_used_fields()
do
{
- ha_set_bit_in_read_set((*ptr)->fieldnr);
+ bitmap_set_bit(table->read_set, (*ptr)->field_index);
} while (*(++ptr));
DBUG_VOID_RETURN;
}
@@ -4158,7 +4174,7 @@ void ha_partition::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
- auto_increment_value= get_auto_increment();
+ stats.auto_increment_value= get_auto_increment();
}
if (flag & HA_STATUS_VARIABLE)
{
@@ -4180,11 +4196,11 @@ void ha_partition::info(uint flag)
check_time: Time of last check (only applicable to MyISAM)
We report last time of all underlying handlers
*/
- records= 0;
- deleted= 0;
- data_file_length= 0;
- index_file_length= 0;
- check_time= 0;
+ stats.records= 0;
+ stats.deleted= 0;
+ stats.data_file_length= 0;
+ stats.index_file_length= 0;
+ stats.check_time= 0;
file_array= m_file;
do
{
@@ -4192,21 +4208,21 @@ void ha_partition::info(uint flag)
{
file= *file_array;
file->info(HA_STATUS_VARIABLE);
- records+= file->records;
- deleted+= file->deleted;
- data_file_length+= file->data_file_length;
- index_file_length+= file->index_file_length;
- if (file->check_time > check_time)
- check_time= file->check_time;
+ stats.records+= file->stats.records;
+ stats.deleted+= file->stats.deleted;
+ stats.data_file_length+= file->stats.data_file_length;
+ stats.index_file_length+= file->stats.index_file_length;
+ if (file->stats.check_time > stats.check_time)
+ stats.check_time= file->stats.check_time;
}
} while (*(++file_array));
- if (records < 2 &&
- m_table_flags & HA_NOT_EXACT_COUNT)
- records= 2;
- if (records > 0)
- mean_rec_length= (ulong) (data_file_length / records);
+ if (stats.records < 2 &&
+ !(m_table_flags & HA_STATS_RECORDS_IS_EXACT))
+ stats.records= 2;
+ if (stats.records > 0)
+ stats.mean_rec_length= (ulong) (stats.data_file_length / stats.records);
else
- mean_rec_length= 1; //? What should we set here
+ stats.mean_rec_length= 1; //? What should we set here
}
if (flag & HA_STATUS_CONST)
{
@@ -4251,7 +4267,6 @@ void ha_partition::info(uint flag)
We ignore it since it is never used
block_size: Block size used
We set it to the value of the first handler
- sortkey: Never used at any place so ignored
ref_length: We set this to the value calculated
and stored in local object
create_time: Creation time of table
@@ -4263,7 +4278,7 @@ void ha_partition::info(uint flag)
file= m_file[0];
file->info(HA_STATUS_CONST);
- create_time= file->create_time;
+ stats.create_time= file->stats.create_time;
ref_length= m_ref_length;
}
if (flag & HA_STATUS_ERRKEY)
@@ -4287,14 +4302,14 @@ void ha_partition::info(uint flag)
Used by SHOW commands
We will report the maximum of these times
*/
- update_time= 0;
+ stats.update_time= 0;
file_array= m_file;
do
{
file= *file_array;
file->info(HA_STATUS_TIME);
- if (file->update_time > update_time)
- update_time= file->update_time;
+ if (file->stats.update_time > stats.update_time)
+ stats.update_time= file->stats.update_time;
} while (*(++file_array));
}
DBUG_VOID_RETURN;
@@ -4308,17 +4323,17 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK);
- stat_info->records= file->records;
- stat_info->mean_rec_length= file->mean_rec_length;
- stat_info->data_file_length= file->data_file_length;
- stat_info->max_data_file_length= file->max_data_file_length;
- stat_info->index_file_length= file->index_file_length;
- stat_info->delete_length= file->delete_length;
- stat_info->create_time= file->create_time;
- stat_info->update_time= file->update_time;
- stat_info->check_time= file->check_time;
+ stat_info->records= file->stats.records;
+ stat_info->mean_rec_length= file->stats.mean_rec_length;
+ stat_info->data_file_length= file->stats.data_file_length;
+ stat_info->max_data_file_length= file->stats.max_data_file_length;
+ stat_info->index_file_length= file->stats.index_file_length;
+ stat_info->delete_length= file->stats.delete_length;
+ stat_info->create_time= file->stats.create_time;
+ stat_info->update_time= file->stats.update_time;
+ stat_info->check_time= file->stats.check_time;
stat_info->check_sum= 0;
- if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+ if (file->ha_table_flags() & HA_HAS_CHECKSUM)
stat_info->check_sum= file->checksum();
return;
}
@@ -4389,22 +4404,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
2) Parameters used by some non-MyISAM handlers
----------------------------------------------
- HA_EXTRA_RETRIEVE_ALL_COLS:
- Many handlers have implemented optimisations to avoid fetching all
- fields when retrieving data. In certain situations all fields need
- to be retrieved even though the query_id is not set on all field
- objects.
-
- It is called from copy_data_between_tables where all fields are
- copied without setting query_id before calling the handlers.
- It is called from UPDATE statements when the fields of the index
- used is updated or ORDER BY is used with UPDATE.
- And finally when calculating checksum of a table using the CHECKSUM
- command.
- HA_EXTRA_RETRIEVE_PRIMARY_KEY:
- In some situations it is mandatory to retrieve primary key fields
- independent of the query id's. This extra flag specifies that fetch
- of primary key fields is mandatory.
HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
This is a strictly InnoDB feature that is more or less undocumented.
When it is activated InnoDB copies field by field from its fetch
@@ -4553,7 +4552,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
4) Parameters only used by temporary tables for query processing
----------------------------------------------------------------
HA_EXTRA_RESET_STATE:
- Same as HA_EXTRA_RESET except that buffers are not released. If there is
+ Same as reset() except that buffers are not released. If there is
a READ CACHE it is reinit'ed. A cache is reinit'ed to restart reading
or to change type of cache between READ CACHE and WRITE CACHE.
@@ -4592,8 +4591,9 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
HA_EXTRA_FLUSH_CACHE:
Flush WRITE CACHE in MyISAM. It is only from one place in the code.
This is in sql_insert.cc where it is called if the table_flags doesn't
- contain HA_DUPP_POS. The only handler having the HA_DUPP_POS set is the
- MyISAM handler and so the only handler not receiving this call is MyISAM.
+ contain HA_DUPLICATE_POS. The only handler having the HA_DUPLICATE_POS
+ set is the MyISAM handler and so the only handler not receiving this
+ call is MyISAM.
Thus in effect this call is called but never used. Could be removed
from sql_insert.cc
HA_EXTRA_NO_USER_CHANGE:
@@ -4637,8 +4637,6 @@ int ha_partition::extra(enum ha_extra_function operation)
/* Category 2), used by non-MyISAM handlers */
case HA_EXTRA_IGNORE_DUP_KEY:
case HA_EXTRA_NO_IGNORE_DUP_KEY:
- case HA_EXTRA_RETRIEVE_ALL_COLS:
- case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
{
if (!m_myisam)
@@ -4704,8 +4702,7 @@ int ha_partition::extra(enum ha_extra_function operation)
0 Success
DESCRIPTION
- This will in the future be called instead of extra(HA_EXTRA_RESET) as this
- is such a common call
+ Called at end of each statement to reste buffers
*/
int ha_partition::reset(void)
@@ -5113,14 +5110,16 @@ void ha_partition::print_error(int error, myf errflag)
/* Should probably look for my own errors first */
/* monty: needs to be called for the last used partition ! */
- DBUG_PRINT("enter", ("error = %d", error));
+ DBUG_PRINT("enter", ("error: %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
{
char buf[100];
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
m_part_info->part_expr->null_value ? "NULL" :
llstr(m_part_info->part_expr->val_int(), buf));
+ dbug_tmp_restore_column_map(table->read_set, old_map);
}
else
m_file[0]->print_error(error, errflag);
@@ -5302,12 +5301,14 @@ void ha_partition::restore_auto_increment()
ulonglong ha_partition::get_auto_increment()
{
- ulonglong auto_inc, max_auto_inc= 0;
+ ulonglong max_auto_inc= 0;
+ handler **pos, **end;
DBUG_ENTER("ha_partition::get_auto_increment");
- for (uint i= 0; i < m_tot_parts; i++)
+ for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
{
- auto_inc= m_file[i]->get_auto_increment();
+ ulonglong auto_inc;
+ auto_inc= (*pos)->get_auto_increment();
set_if_bigger(max_auto_inc, auto_inc);
}
DBUG_RETURN(max_auto_inc);
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index b31b9af28a3..13a1e5cf6e6 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -95,7 +95,6 @@ private:
uint m_rec_length; // Local copy of record length
bool m_ordered; // Ordered/Unordered index scan
- bool m_has_transactions; // Can we support transactions
bool m_pkey_is_clustered; // Is primary key clustered
bool m_create_handler; // Handler used to create table
bool m_is_sub_partitioned; // Is subpartitioned
@@ -157,7 +156,7 @@ public:
enable later calls of the methods to retrieve constants from the under-
lying handlers. Returns false if not successful.
*/
- int ha_initialise();
+ bool initialise_partition(MEM_ROOT *mem_root);
/*
-------------------------------------------------------------------------
@@ -208,25 +207,24 @@ private:
delete_table, rename_table and create uses very similar logic which
is packed into this routine.
*/
- uint del_ren_cre_table(const char *from,
- const char *to= NULL,
- TABLE *table_arg= NULL,
- HA_CREATE_INFO *create_info= NULL);
+ uint del_ren_cre_table(const char *from, const char *to,
+ TABLE *table_arg, HA_CREATE_INFO *create_info);
/*
One method to create the table_name.par file containing the names of the
underlying partitions, their engine and the number of partitions.
And one method to read it in.
*/
bool create_handler_file(const char *name);
- bool get_from_handler_file(const char *name);
- bool new_handlers_from_part_info();
- bool create_handlers();
+ bool get_from_handler_file(const char *name, MEM_ROOT *mem_root);
+ bool new_handlers_from_part_info(MEM_ROOT *mem_root);
+ bool create_handlers(MEM_ROOT *mem_root);
void clear_handler_file();
void set_up_table_before_create(TABLE *table_arg,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
uint part_id);
partition_element *find_partition_element(uint part_id);
+
public:
/*
@@ -588,7 +586,7 @@ public:
NULLable field.
(BDB, HEAP, MyISAM, NDB, InnoDB)
- HA_DUPP_POS:
+ HA_DUPLICATE_POS:
Tells that we can the position for the conflicting duplicate key
record is stored in table->file->dupp_ref. (insert uses rnd_pos() on
this to find the duplicated row)
@@ -609,11 +607,10 @@ public:
with hidden primary key)
(No handler has this limitation currently)
- HA_NOT_EXACT_COUNT:
+ HA_STATS_RECORDS_IS_EXACT:
Does the counter of records after the info call specify an exact
- value or not. If it doesn't this flag is set.
+ value or not. If it does this flag is set.
Only MyISAM and HEAP uses exact count.
- (MyISAM, HEAP, BDB, InnoDB, NDB, Federated)
HA_CAN_INSERT_DELAYED:
Can the storage engine support delayed inserts.
@@ -676,7 +673,7 @@ public:
index scan module.
(NDB)
*/
- virtual ulong table_flags() const
+ virtual ulonglong table_flags() const
{ return m_table_flags; }
/*
@@ -771,13 +768,6 @@ public:
virtual uint min_record_length(uint options) const;
/*
- Transactions on the table is supported if all handlers below support
- transactions.
- */
- virtual bool has_transactions()
- { return m_has_transactions; }
-
- /*
Primary key is clustered can only be true if all underlying handlers have
this feature.
*/
diff --git a/sql/handler.cc b/sql/handler.cc
index b9ef05a33c2..927a8eb8ed5 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -44,18 +44,19 @@
#include "ha_innodb.h"
#endif
-/* While we have legacy_db_type, we have this array to
- check for dups and to find handlerton from legacy_db_type.
- Remove when legacy_db_type is finally gone */
-static handlerton *installed_htons[128];
+/*
+ While we have legacy_db_type, we have this array to
+ check for dups and to find handlerton from legacy_db_type.
+ Remove when legacy_db_type is finally gone
+*/
-#define BITMAP_STACKBUF_SIZE (128/8)
+static handlerton *installed_htons[128];
KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} };
/* static functions defined in this file */
-static handler *create_default(TABLE_SHARE *table);
+static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root);
const handlerton default_hton =
{
@@ -146,8 +147,7 @@ handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name)
const char *ha_get_storage_engine(enum legacy_db_type db_type)
{
- switch (db_type)
- {
+ switch (db_type) {
case DB_TYPE_DEFAULT:
return "DEFAULT";
case DB_TYPE_UNKNOWN:
@@ -161,18 +161,17 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
}
-static handler *create_default(TABLE_SHARE *table)
+static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- handlerton *hton=ha_resolve_by_legacy_type(current_thd, DB_TYPE_DEFAULT);
+ handlerton *hton= ha_resolve_by_legacy_type(current_thd, DB_TYPE_DEFAULT);
return (hton && hton != &default_hton && hton->create) ?
- hton->create(table) : NULL;
+ hton->create(table, mem_root) : NULL;
}
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
{
- switch (db_type)
- {
+ switch (db_type) {
case DB_TYPE_DEFAULT:
return (thd->variables.table_type != NULL) ?
thd->variables.table_type :
@@ -225,36 +224,23 @@ handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
handlerton *db_type)
{
- handler *file= NULL;
- /*
- handlers are allocated with new in the handlerton create() function
- we need to set the thd mem_root for these to be allocated correctly
- */
- THD *thd= current_thd;
- MEM_ROOT *thd_save_mem_root= thd->mem_root;
- thd->mem_root= alloc;
-
- if (db_type != NULL && db_type->state == SHOW_OPTION_YES && db_type->create)
- file= db_type->create(share);
-
- thd->mem_root= thd_save_mem_root;
+ handler *file;
+ DBUG_ENTER("get_new_handler");
+ DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc));
- if (!file)
+ if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
{
- handlerton *def= current_thd->variables.table_type;
- /* Try first with 'default table type' */
- if (db_type != def)
- return get_new_handler(share, alloc, def);
+ if ((file= db_type->create(share, alloc)))
+ file->init();
+ DBUG_RETURN(file);
}
- if (file)
- {
- if (file->ha_initialise())
- {
- delete file;
- file=0;
- }
- }
- return file;
+ /*
+ Try the default table type
+ Here the call to current_thd() is ok as we call this function a lot of
+ times but we enter this branch very seldom.
+ */
+ DBUG_RETURN(get_new_handler(share, alloc,
+ current_thd->variables.table_type));
}
@@ -265,11 +251,13 @@ handler *get_ha_partition(partition_info *part_info)
DBUG_ENTER("get_ha_partition");
if ((partition= new ha_partition(part_info)))
{
- if (partition->ha_initialise())
+ if (partition->initialise_partition(current_thd->mem_root))
{
delete partition;
partition= 0;
}
+ else
+ partition->init();
}
else
{
@@ -1379,7 +1367,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
! (file=get_new_handler(&dummy_share, thd->mem_root, table_type)))
DBUG_RETURN(ENOENT);
- if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED))
+ if (lower_case_table_names == 2 && !(file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that table handler get path in lower case */
strmov(tmp_path, path);
@@ -1462,6 +1450,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
table= table_arg;
DBUG_ASSERT(table->s == table_share);
+ DBUG_ASSERT(alloc_root_inited(&table->mem_root));
if ((error=open(name,mode,test_if_locked)))
{
@@ -1483,106 +1472,23 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
table->db_stat|=HA_READ_ONLY;
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
- DBUG_ASSERT(alloc_root_inited(&table->mem_root));
-
if (!(ref= (byte*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length)*2)))
{
close();
error=HA_ERR_OUT_OF_MEM;
}
else
- dupp_ref=ref+ALIGN_SIZE(ref_length);
-
- if (ha_allocate_read_write_set(table->s->fields))
- error= 1;
+ dup_ref=ref+ALIGN_SIZE(ref_length);
+ cached_table_flags= table_flags();
}
DBUG_RETURN(error);
}
-int handler::ha_initialise()
-{
- DBUG_ENTER("ha_initialise");
- DBUG_RETURN(FALSE);
-}
-
-
-/*
- Initalize bit maps for used fields
-
- Called from open_table_from_share()
-*/
-
-int handler::ha_allocate_read_write_set(ulong no_fields)
-{
- uint bitmap_size= bitmap_buffer_size(no_fields+1);
- uint32 *read_buf, *write_buf;
- DBUG_ENTER("ha_allocate_read_write_set");
- DBUG_PRINT("enter", ("no_fields = %d", no_fields));
-
- if (!multi_alloc_root(&table->mem_root,
- &read_set, sizeof(MY_BITMAP),
- &write_set, sizeof(MY_BITMAP),
- &read_buf, bitmap_size,
- &write_buf, bitmap_size,
- NullS))
- {
- DBUG_RETURN(TRUE);
- }
- bitmap_init(read_set, read_buf, no_fields+1, FALSE);
- bitmap_init(write_set, write_buf, no_fields+1, FALSE);
- table->read_set= read_set;
- table->write_set= write_set;
- ha_clear_all_set();
- DBUG_RETURN(FALSE);
-}
-
-void handler::ha_clear_all_set()
-{
- DBUG_ENTER("ha_clear_all_set");
- bitmap_clear_all(read_set);
- bitmap_clear_all(write_set);
- bitmap_set_bit(read_set, 0);
- bitmap_set_bit(write_set, 0);
- DBUG_VOID_RETURN;
-}
-
-int handler::ha_retrieve_all_cols()
-{
- DBUG_ENTER("handler::ha_retrieve_all_cols");
- bitmap_set_all(read_set);
- DBUG_RETURN(0);
-}
-
-int handler::ha_retrieve_all_pk()
-{
- DBUG_ENTER("ha_retrieve_all_pk");
- ha_set_primary_key_in_read_set();
- DBUG_RETURN(0);
-}
-
-void handler::ha_set_primary_key_in_read_set()
-{
- ulong prim_key= table->s->primary_key;
- DBUG_ENTER("handler::ha_set_primary_key_in_read_set");
- DBUG_PRINT("info", ("Primary key = %d", prim_key));
- if (prim_key != MAX_KEY)
- {
- KEY_PART_INFO *key_part= table->key_info[prim_key].key_part;
- KEY_PART_INFO *key_part_end= key_part +
- table->key_info[prim_key].key_parts;
- for (;key_part != key_part_end; ++key_part)
- ha_set_bit_in_read_set(key_part->fieldnr);
- }
- DBUG_VOID_RETURN;
-}
-
-
-
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
- has the HA_NOT_EXACT_COUNT set.
+ has the HA_STATS_RECORDS_IS_EXACT set.
*/
int handler::read_first_row(byte * buf, uint primary_key)
@@ -1598,7 +1504,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
scanning the table.
TODO remove the test for HA_READ_ORDER
*/
- if (deleted < 10 || primary_key >= MAX_KEY ||
+ if (stats.deleted < 10 || primary_key >= MAX_KEY ||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
{
(void) ha_rnd_init(1);
@@ -1786,12 +1692,38 @@ void handler::restore_auto_increment()
}
+/*
+ MySQL signal that it changed the column bitmap
+
+ USAGE
+ This is for handlers that needs to setup their own column bitmaps.
+ Normally the handler should set up their own column bitmaps in
+ index_init() or rnd_init() and in any column_bitmaps_signal() call after
+ this.
+
+ The handler is allowd to do changes to the bitmap after a index_init or
+ rnd_init() call is made as after this, MySQL will not use the bitmap
+ for any program logic checking.
+*/
+
+void handler::column_bitmaps_signal()
+{
+ DBUG_ENTER("column_bitmaps_signal");
+ DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set,
+ table->write_set));
+ DBUG_VOID_RETURN;
+}
+
+
ulonglong handler::get_auto_increment()
{
ulonglong nr;
int error;
(void) extra(HA_EXTRA_KEYREAD);
+ table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
+ table->read_set);
+ column_bitmaps_signal();
index_init(table->s->next_number_index, 1);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
@@ -1857,18 +1789,20 @@ void handler::print_error(int error, myf errflag)
uint key_nr=get_dup_key(error);
if ((int) key_nr >= 0)
{
+ uint max_length;
/* Write the duplicated key in the error message */
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
/* Table is opened and defined at this point */
key_unpack(&str,table,(uint) key_nr);
- uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY));
+ max_length= MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY));
if (str.length() >= max_length)
{
str.length(max_length-4);
str.append(STRING_WITH_LEN("..."));
}
- my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), table->key_info[key_nr].name);
+ my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(),
+ table->key_info[key_nr].name);
DBUG_VOID_RETURN;
}
textno=ER_DUP_KEY;
@@ -1879,12 +1813,14 @@ void handler::print_error(int error, myf errflag)
uint key_nr= get_dup_key(error);
if ((int) key_nr >= 0)
{
+ uint max_length;
/* Write the key in the error message */
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
/* Table is opened and defined at this point */
key_unpack(&str,table,(uint) key_nr);
- uint max_length= MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY));
+ max_length= (MYSQL_ERRMSG_SIZE-
+ (uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY)));
if (str.length() >= max_length)
{
str.length(max_length-4);
@@ -2293,22 +2229,23 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
}
-void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id)
+void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id)
{
info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK);
- stat_info->records= records;
- stat_info->mean_rec_length= mean_rec_length;
- stat_info->data_file_length= data_file_length;
- stat_info->max_data_file_length= max_data_file_length;
- stat_info->index_file_length= index_file_length;
- stat_info->delete_length= delete_length;
- stat_info->create_time= create_time;
- stat_info->update_time= update_time;
- stat_info->check_time= check_time;
- stat_info->check_sum= 0;
+ stat_info->records= stats.records;
+ stat_info->mean_rec_length= stats.mean_rec_length;
+ stat_info->data_file_length= stats.data_file_length;
+ stat_info->max_data_file_length= stats.max_data_file_length;
+ stat_info->index_file_length= stats.index_file_length;
+ stat_info->delete_length= stats.delete_length;
+ stat_info->create_time= stats.create_time;
+ stat_info->update_time= stats.update_time;
+ stat_info->check_time= stats.check_time;
+ stat_info->check_sum= 0;
if (table_flags() & (ulong) HA_HAS_CHECKSUM)
- stat_info->check_sum= checksum();
+ stat_info->check_sum= checksum();
return;
}
@@ -2352,7 +2289,7 @@ int ha_create_table(THD *thd, const char *path,
name= share.path.str;
if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
+ !(table.file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that handler gets name in lower case */
strmov(name_buff, name);
@@ -2431,7 +2368,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
+ !(table.file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that handler gets name in lower case */
my_casedn_str(files_charset_info, path);
@@ -2781,6 +2718,9 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
multi_range_sorted= sorted;
multi_range_buffer= buffer;
+ table->mark_columns_used_by_index_no_reset(active_index, table->read_set);
+ table->column_bitmaps_set(table->read_set, table->write_set);
+
for (multi_range_curr= ranges, multi_range_end= ranges + range_count;
multi_range_curr < multi_range_end;
multi_range_curr++)
@@ -3023,7 +2963,7 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin,
handlerton *hton= (handlerton *) plugin->plugin->info;
handler *file;
if (hton->state == SHOW_OPTION_YES && hton->create &&
- (file= hton->create((TABLE_SHARE*) 0)))
+ (file= hton->create((TABLE_SHARE*) 0, current_thd->mem_root)))
{
List_iterator_fast<char> it(*found_exts);
const char **ext, *old_ext;
@@ -3160,7 +3100,7 @@ namespace {
char const *name;
};
- int table_name_compare(void const *a, void const *b)
+ static int table_name_compare(void const *a, void const *b)
{
st_table_data const *x = (st_table_data const*) a;
st_table_data const *y = (st_table_data const*) b;
@@ -3170,7 +3110,7 @@ namespace {
return res != 0 ? res : strcmp(x->name, y->name);
}
- bool check_table_binlog_row_based(THD *thd, TABLE *table)
+ static bool check_table_binlog_row_based(THD *thd, TABLE *table)
{
static st_table_data const ignore[] = {
{ "mysql", "event" },
@@ -3191,44 +3131,29 @@ namespace {
DBUG_ASSERT(table->s->cached_row_logging_check == 0 ||
table->s->cached_row_logging_check == 1);
- return
- thd->current_stmt_binlog_row_based &&
- thd && (thd->options & OPTION_BIN_LOG) &&
- mysql_bin_log.is_open() &&
- table->s->cached_row_logging_check;
+ return (thd->current_stmt_binlog_row_based &&
+ (thd->options & OPTION_BIN_LOG) &&
+ mysql_bin_log.is_open() &&
+ table->s->cached_row_logging_check);
}
}
-template<class RowsEventT> int binlog_log_row(TABLE* table,
+template<class RowsEventT> int binlog_log_row(TABLE *table,
const byte *before_record,
const byte *after_record)
{
if (table->file->is_injective())
return 0;
bool error= 0;
- THD *const thd= current_thd;
-
- if (check_table_binlog_row_based(thd, table))
- {
- MY_BITMAP cols;
- /* Potential buffer on the stack for the bitmap */
- uint32 bitbuf[BITMAP_STACKBUF_SIZE/sizeof(uint32)];
- uint n_fields= table->s->fields;
- my_bool use_bitbuf= n_fields <= sizeof(bitbuf)*8;
- if (likely(!(error= bitmap_init(&cols,
- use_bitbuf ? bitbuf : NULL,
- (n_fields + 7) & ~7UL,
- false))))
- {
- bitmap_set_all(&cols);
- error=
- RowsEventT::binlog_row_logging_function(thd, table,
- table->file->has_transactions(),
- &cols, table->s->fields,
- before_record, after_record);
- if (!use_bitbuf)
- bitmap_free(&cols);
- }
+
+ if (check_table_binlog_row_based(table->in_use, table))
+ {
+ error=
+ RowsEventT::binlog_row_logging_function(table->in_use, table,
+ table->file->has_transactions(),
+ &table->s->all_set,
+ table->s->fields,
+ before_record, after_record);
}
return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
}
@@ -3289,6 +3214,28 @@ int handler::ha_external_lock(THD *thd, int lock_type)
DBUG_RETURN(0);
}
+
+/*
+ Check handler usage and reset state of file to after 'open'
+*/
+
+int handler::ha_reset()
+{
+ DBUG_ENTER("ha_reset");
+ /* Check that we have called all proper delallocation functions */
+ DBUG_ASSERT((byte*) table->def_read_set.bitmap +
+ table->s->column_bitmap_size ==
+ (char*) table->def_write_set.bitmap);
+ DBUG_ASSERT(bitmap_is_set_all(&table->s->all_set));
+ DBUG_ASSERT(table->key_read == 0);
+ /* ensure that ha_index_end / ha_rnd_end has been called */
+ DBUG_ASSERT(inited == NONE);
+ /* Free cache used by filesort */
+ free_io_cache(table);
+ DBUG_RETURN(reset());
+}
+
+
int handler::ha_write_row(byte *buf)
{
int error;
@@ -3331,3 +3278,16 @@ int handler::ha_delete_row(const byte *buf)
#endif
return 0;
}
+
+
+/*
+ use_hidden_primary_key() is called in case of an update/delete when
+ (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
+ but we don't have a primary key
+*/
+
+void handler::use_hidden_primary_key()
+{
+ /* fallback to use all columns in the table to identify row */
+ table->use_all_columns();
+}
diff --git a/sql/handler.h b/sql/handler.h
index d988e46b236..04b0086d58d 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -49,14 +49,18 @@
/* Bits in table_flags() to show what database can do */
+#define HA_NO_TRANSACTIONS (1 << 0) /* Doesn't support transactions */
+#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */
+#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
/*
- Can switch index during the scan with ::rnd_same() - not used yet.
- see mi_rsame/heap_rsame/myrg_rsame
+ The following should be set if the following is not true when scanning
+ a table with rnd_next()
+ - We will see all rows (including deleted ones)
+ - Row positions are 'table->s->db_record_offset' apart
+ If this flag is not set, filesort will do a postion() call for each matched
+ row to be able to find the row later.
*/
-#define HA_READ_RND_SAME (1 << 0)
-#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
-#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
- It returns a position to ha_r_rnd */
+#define HA_REC_NOT_IN_SEQ (1 << 3)
#define HA_CAN_GEOMETRY (1 << 4)
/*
Reading keys in random order is as fast as reading keys in sort order
@@ -64,28 +68,41 @@
filesort to decide if we should sort key + data or key + pointer-to-row
*/
#define HA_FAST_KEY_READ (1 << 5)
+/*
+ Set the following flag if we on delete should force all key to be read
+ and on update read all keys that changes
+*/
+#define HA_REQUIRES_KEY_COLUMNS_FOR_DELETE (1 << 6)
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
-#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
+#define HA_DUPLICATE_POS (1 << 8) /* ha_position() gives dup row */
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
#define HA_CAN_INDEX_BLOBS (1 << 10)
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
-#define HA_NOT_EXACT_COUNT (1 << 13)
+#define HA_STATS_RECORDS_IS_EXACT (1 << 13) /* stats.records is exact */
/*
INSERT_DELAYED only works with handlers that uses MySQL internal table
level locks
*/
#define HA_CAN_INSERT_DELAYED (1 << 14)
+/*
+ If we get the primary key columns for free when we do an index read
+ It also implies that we have to retrive the primary key when using
+ position() and rnd_pos().
+*/
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
/*
- If HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS is set, it means that the engine can
- do this: the position of an arbitrary record can be retrieved using
- position() when the table has a primary key, effectively allowing random
- access on the table based on a given record.
+ If HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is set, it means that to position()
+ uses a primary key. Without primary key, we can't call position().
*/
-#define HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS (1 << 16)
+#define HA_PRIMARY_KEY_REQUIRED_FOR_POSITION (1 << 16)
#define HA_CAN_RTREEKEYS (1 << 17)
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
+/*
+ The following is we need to a primary key to delete (and update) a row.
+ If there is no primary key, all columns needs to be read on update and delete
+*/
+#define HA_PRIMARY_KEY_REQUIRED_FOR_DELETE (1 << 19)
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
#define HA_CAN_FULLTEXT (1 << 21)
#define HA_CAN_SQL_HANDLER (1 << 22)
@@ -97,7 +114,8 @@
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
-#define HA_NO_COPY_ON_ALTER (1 << 31)
+#define HA_NO_COPY_ON_ALTER (LL(1) << 31)
+#define HA_HAS_RECORDS (LL(1) << 32) /* records() gives exact count*/
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@@ -240,7 +258,7 @@ enum legacy_db_type
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
- ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
+ ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGES };
enum enum_binlog_func {
BFN_RESET_LOGS= 1,
@@ -563,7 +581,7 @@ struct handlerton
void *(*create_cursor_read_view)();
void (*set_cursor_read_view)(void *);
void (*close_cursor_read_view)(void *);
- handler *(*create)(TABLE_SHARE *table);
+ handler *(*create)(TABLE_SHARE *table, MEM_ROOT *mem_root);
void (*drop_database)(char* path);
int (*panic)(enum ha_panic_function flag);
int (*start_consistent_snapshot)(THD *thd);
@@ -746,11 +764,37 @@ typedef struct st_handler_buffer
typedef struct system_status_var SSV;
+class ha_statistics
+{
+public:
+ ulonglong data_file_length; /* Length off data file */
+ ulonglong max_data_file_length; /* Length off data file */
+ ulonglong index_file_length;
+ ulonglong max_index_file_length;
+ ulonglong delete_length; /* Free bytes */
+ ulonglong auto_increment_value;
+ ha_rows records; /* Estimated records in table */
+ ha_rows deleted; /* Deleted records */
+ ulong mean_rec_length; /* physical reclength */
+ time_t create_time; /* When table was created */
+ time_t check_time;
+ time_t update_time;
+ uint block_size; /* index block size */
+
+ ha_statistics():
+ data_file_length(0), max_data_file_length(0),
+ index_file_length(0), delete_length(0), auto_increment_value(0),
+ records(0), deleted(0), mean_rec_length(0), create_time(0),
+ check_time(0), update_time(0), block_size(0)
+ {}
+};
+
/*
The handler class is the interface for dynamically loadable
storage engines. Do not add ifdefs and take care when adding or
changing virtual functions to avoid vtable confusion
*/
+
class handler :public Sql_alloc
{
friend class ha_partition;
@@ -758,6 +802,7 @@ class handler :public Sql_alloc
protected:
struct st_table_share *table_share; /* The table definition */
struct st_table *table; /* The current open table */
+ ulonglong cached_table_flags; /* Set on init() and open() */
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
@@ -770,28 +815,15 @@ class handler :public Sql_alloc
*/
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
-
+ virtual ulonglong table_flags(void) const =0;
void ha_statistic_increment(ulong SSV::*offset) const;
-
-private:
- virtual int reset() { return extra(HA_EXTRA_RESET); }
public:
const handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
- byte *dupp_ref; /* Pointer to dupp row */
- ulonglong data_file_length; /* Length off data file */
- ulonglong max_data_file_length; /* Length off data file */
- ulonglong index_file_length;
- ulonglong max_index_file_length;
- ulonglong delete_length; /* Free bytes */
- ulonglong auto_increment_value;
- ha_rows records; /* Records in table */
- ha_rows deleted; /* Deleted records */
- ulong mean_rec_length; /* physical reclength */
- time_t create_time; /* When table was created */
- time_t check_time;
- time_t update_time;
+ byte *dup_ref; /* Pointer to duplicate row */
+
+ ha_statistics stats;
/* The following are for read_multi_range */
bool multi_range_sorted;
@@ -806,27 +838,20 @@ public:
bool eq_range;
uint errkey; /* Last dup key */
- uint sortkey, key_used_on_scan;
+ uint key_used_on_scan;
uint active_index;
/* Length of ref (1-8 or the clustered key length) */
uint ref_length;
- uint block_size; /* index block size */
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
bool auto_increment_column_changed;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
- MY_BITMAP *read_set;
- MY_BITMAP *write_set;
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), ht(ht_arg),
- ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
- delete_length(0), auto_increment_value(0),
- records(0), deleted(0), mean_rec_length(0),
- create_time(0), check_time(0), update_time(0),
- key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
- ref_length(sizeof(my_off_t)), block_size(0),
+ ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
+ ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE), implicit_emptied(0),
pushed_cond(NULL)
{}
@@ -834,6 +859,11 @@ public:
{
/* TODO: DBUG_ASSERT(inited == NONE); */
}
+ /* This is called after create to allow us to set up cached variables */
+ void init()
+ {
+ cached_table_flags= table_flags();
+ }
/*
Check whether a handler allows to lock the table.
@@ -861,7 +891,6 @@ public:
{
return TRUE;
}
- virtual int ha_initialise();
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
@@ -873,21 +902,27 @@ public:
table_share= share;
}
virtual double scan_time()
- { return ulonglong2double(data_file_length) / IO_SIZE + 2; }
+ { return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return rows2double(ranges+rows); }
+ { return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
- virtual bool has_transactions(){ return 0;}
+ bool has_transactions()
+ { return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
virtual uint extra_rec_buf_length() const { return 0; }
/*
+ Number of rows in table. It will only be called if
+ (table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
+ */
+ virtual ha_rows records() { return stats.records; }
+ /*
Return upper bound of current number of records in the table
(max. of how many records one will retrieve when doing a full table scan)
If upper bound is not known, HA_POS_ERROR should be returned as a max
possible upper bound.
*/
virtual ha_rows estimate_rows_upper_bound()
- { return records+EXTRA_RECORDS; }
+ { return stats.records+EXTRA_RECORDS; }
/*
Get the row type from the storage engine. If this method returns
@@ -925,139 +960,23 @@ public:
inited=NONE;
DBUG_RETURN(rnd_end());
}
- int ha_reset()
- {
- DBUG_ENTER("ha_reset");
- ha_clear_all_set();
- DBUG_RETURN(reset());
- }
+ int ha_reset();
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
+ longlong ha_table_flags() { return cached_table_flags; }
+
/*
- These are a set of routines used to enable handlers to only read/write
- partial lists of the fields in the table. The bit vector is maintained
- by the server part and is used by the handler at calls to read/write
- data in the table.
- It replaces the use of query id's for this purpose. The benefit is that
- the handler can also set bits in the read/write set if it has special
- needs and it is also easy for other parts of the server to interact
- with the handler (e.g. the replication part for row-level logging).
- The routines are all part of the general handler and are not possible
- to override by a handler. A handler can however set/reset bits by
- calling these routines.
-
- The methods ha_retrieve_all_cols and ha_retrieve_all_pk are made
- virtual to handle InnoDB specifics. If InnoDB doesn't need the
- extra parameters HA_EXTRA_RETRIEVE_ALL_COLS and
- HA_EXTRA_RETRIEVE_PRIMARY_KEY anymore then these methods need not be
- virtual anymore.
+ Signal that the table->read_set and table->write_set table maps changed
+ The handler is allowed to set additional bits in the above map in this
+ call. Normally the handler should ignore all calls until we have done
+ a ha_rnd_init() or ha_index_init(), write_row(), update_row or delete_row()
+ as there may be several calls to this routine.
*/
- virtual int ha_retrieve_all_cols();
- virtual int ha_retrieve_all_pk();
- void ha_set_all_bits_in_read_set()
- {
- DBUG_ENTER("ha_set_all_bits_in_read_set");
- bitmap_set_all(read_set);
- DBUG_VOID_RETURN;
- }
- void ha_set_all_bits_in_write_set()
- {
- DBUG_ENTER("ha_set_all_bits_in_write_set");
- bitmap_set_all(write_set);
- DBUG_VOID_RETURN;
- }
- void ha_set_bit_in_read_set(uint fieldnr)
- {
- DBUG_ENTER("ha_set_bit_in_read_set");
- DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
- bitmap_set_bit(read_set, fieldnr);
- DBUG_VOID_RETURN;
- }
- void ha_clear_bit_in_read_set(uint fieldnr)
- {
- DBUG_ENTER("ha_clear_bit_in_read_set");
- DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
- bitmap_clear_bit(read_set, fieldnr);
- DBUG_VOID_RETURN;
- }
- void ha_set_bit_in_write_set(uint fieldnr)
- {
- DBUG_ENTER("ha_set_bit_in_write_set");
- DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
- bitmap_set_bit(write_set, fieldnr);
- DBUG_VOID_RETURN;
- }
- void ha_clear_bit_in_write_set(uint fieldnr)
- {
- DBUG_ENTER("ha_clear_bit_in_write_set");
- DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
- bitmap_clear_bit(write_set, fieldnr);
- DBUG_VOID_RETURN;
- }
- void ha_set_bit_in_rw_set(uint fieldnr, bool write_op)
- {
- DBUG_ENTER("ha_set_bit_in_rw_set");
- DBUG_PRINT("info", ("Set bit %u in read set", fieldnr));
- bitmap_set_bit(read_set, fieldnr);
- if (!write_op) {
- DBUG_VOID_RETURN;
- }
- else
- {
- DBUG_PRINT("info", ("Set bit %u in read and write set", fieldnr));
- bitmap_set_bit(write_set, fieldnr);
- }
- DBUG_VOID_RETURN;
- }
- bool ha_get_bit_in_read_set(uint fieldnr)
- {
- bool bit_set=bitmap_is_set(read_set,fieldnr);
- DBUG_ENTER("ha_get_bit_in_read_set");
- DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
- DBUG_RETURN(bit_set);
- }
- bool ha_get_bit_in_write_set(uint fieldnr)
- {
- bool bit_set=bitmap_is_set(write_set,fieldnr);
- DBUG_ENTER("ha_get_bit_in_write_set");
- DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
- DBUG_RETURN(bit_set);
- }
- bool ha_get_all_bit_in_read_set()
- {
- bool all_bits_set= bitmap_is_set_all(read_set);
- DBUG_ENTER("ha_get_all_bit_in_read_set");
- DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
- DBUG_RETURN(all_bits_set);
- }
- bool ha_get_all_bit_in_read_clear()
- {
- bool all_bits_set= bitmap_is_clear_all(read_set);
- DBUG_ENTER("ha_get_all_bit_in_read_clear");
- DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
- DBUG_RETURN(all_bits_set);
- }
- bool ha_get_all_bit_in_write_set()
- {
- bool all_bits_set= bitmap_is_set_all(write_set);
- DBUG_ENTER("ha_get_all_bit_in_write_set");
- DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
- DBUG_RETURN(all_bits_set);
- }
- bool ha_get_all_bit_in_write_clear()
- {
- bool all_bits_set= bitmap_is_clear_all(write_set);
- DBUG_ENTER("ha_get_all_bit_in_write_clear");
- DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
- DBUG_RETURN(all_bits_set);
- }
- void ha_set_primary_key_in_read_set();
- int ha_allocate_read_write_set(ulong no_fields);
- void ha_clear_all_set();
+ virtual void column_bitmaps_signal();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int close(void)=0;
@@ -1210,6 +1129,13 @@ public:
{ return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
{ return extra(operation); }
+
+ /*
+ Reset state of file to after 'open'
+ This function is called after every statement for all tables used
+ by that statement.
+ */
+ virtual int reset() { return 0; }
/*
In an UPDATE or DELETE, if the row under the cursor was locked by another
transaction, and the engine used an optimistic read of the last
@@ -1339,7 +1265,6 @@ public:
/* The following can be called without an open handler */
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
- virtual ulong table_flags(void) const =0;
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
virtual void set_auto_partitions(partition_info *part_info) { return; }
@@ -1448,7 +1373,6 @@ public:
false otherwise
*/
virtual bool primary_key_is_clustered() { return FALSE; }
-
virtual int cmp_ref(const byte *ref1, const byte *ref2)
{
return memcmp(ref1, ref2, ref_length);
@@ -1464,10 +1388,12 @@ public:
cond_push()
cond Condition to be pushed. The condition tree must not be
modified by the by the caller.
+
RETURN
The 'remainder' condition that caller must use to filter out records.
NULL means the handler will not return rows that do not match the
passed condition.
+
NOTES
The pushed conditions form a stack (from which one can remove the
last pushed condition using cond_pop).
@@ -1475,7 +1401,7 @@ public:
AND ... AND pushed_condN)
or less restrictive condition, depending on handler's capabilities.
- handler->extra(HA_EXTRA_RESET) call empties the condition stack.
+ handler->ha_reset() call empties the condition stack.
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
condition stack.
*/
@@ -1491,18 +1417,7 @@ public:
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
-private:
- /*
- Row-level primitives for storage engines. These should be
- overridden by the storage engine class. To call these methods, use
- the corresponding 'ha_*' method above.
- */
- virtual int external_lock(THD *thd __attribute__((unused)),
- int lock_type __attribute__((unused)))
- {
- return 0;
- }
-
+ /* These are only called from sql_select for internal temporary tables */
virtual int write_row(byte *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
@@ -1518,6 +1433,24 @@ private:
{
return HA_ERR_WRONG_COMMAND;
}
+ /*
+ use_hidden_primary_key() is called in case of an update/delete when
+ (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
+ but we don't have a primary key
+ */
+ virtual void use_hidden_primary_key();
+
+private:
+ /*
+ Row-level primitives for storage engines. These should be
+ overridden by the storage engine class. To call these methods, use
+ the corresponding 'ha_*' method above.
+ */
+ virtual int external_lock(THD *thd __attribute__((unused)),
+ int lock_type __attribute__((unused)))
+ {
+ return 0;
+ }
};
/* Some extern variables used with handlers */
diff --git a/sql/item.cc b/sql/item.cc
index f778f0cb38e..b663018faac 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -550,6 +550,23 @@ bool Item_field::find_item_in_field_list_processor(byte *arg)
}
+/*
+ Mark field in read_map
+
+ NOTES
+ This is used by filesort to register used fields in a a temporary
+ column read set or to register used fields in a view
+*/
+
+bool Item_field::register_field_in_read_map(byte *arg)
+{
+ TABLE *table= (TABLE *) arg;
+ if (field->table == table || !table)
+ bitmap_set_bit(field->table->read_set, field->field_index);
+ return 0;
+}
+
+
bool Item::check_cols(uint c)
{
if (c != 1)
@@ -789,14 +806,25 @@ CHARSET_INFO *Item::default_charset()
}
+/*
+ Save value in field, but don't give any warnings
+
+ NOTES
+ This is used to temporary store and retrieve a value in a column,
+ for example in opt_range to adjust the key value to fit the column.
+*/
+
int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
{
int res;
- THD *thd= field->table->in_use;
+ TABLE *table= field->table;
+ THD *thd= table->in_use;
enum_check_fields tmp= thd->count_cuted_fields;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
res= save_in_field(field, no_conversions);
thd->count_cuted_fields= tmp;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return res;
}
@@ -2365,7 +2393,8 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry)
CHARSET_INFO *tocs= thd->variables.collation_connection;
uint32 dummy_offset;
- value.cs_info.character_set_of_placeholder= fromcs;
+ value.cs_info.character_set_of_placeholder=
+ value.cs_info.character_set_client= fromcs;
/*
Setup source and destination character sets so that they
are different only if conversion is necessary: this will
@@ -3568,7 +3597,8 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
Item** res= find_item_in_list(this, thd->lex->current_select->item_list,
&counter, REPORT_EXCEPT_NOT_FOUND,
&not_used);
- if (res != (Item **)not_found_item && (*res)->type() == Item::FIELD_ITEM)
+ if (res != (Item **)not_found_item &&
+ (*res)->type() == Item::FIELD_ITEM)
{
set_field((*((Item_field**)res))->field);
return 0;
@@ -3587,7 +3617,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
if it is not expression from merged VIEW we will set this field.
We can leave expression substituted from view for next PS/SP rexecution
- (i.e. do not register this substitution for reverting on cleupup()
+ (i.e. do not register this substitution for reverting on cleanup()
(register_item_tree_changing())), because this subtree will be
fix_field'ed during setup_tables()->setup_underlying() (i.e. before
all other expressions of query, and references on tables which do
@@ -3599,13 +3629,13 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
return FALSE;
if (!outer_fixed && cached_table && cached_table->select_lex &&
- context->select_lex &&
- cached_table->select_lex != context->select_lex)
+ context->select_lex &&
+ cached_table->select_lex != context->select_lex)
{
int ret;
if ((ret= fix_outer_field(thd, &from_field, reference)) < 0)
goto error;
- else if (!ret)
+ if (!ret)
return FALSE;
}
@@ -3616,17 +3646,28 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
thd->lex->current_select->nest_level);
}
- else if (thd->set_query_id)
+ else if (thd->mark_used_columns != MARK_COLUMNS_NONE)
{
TABLE *table= field->table;
- table->file->ha_set_bit_in_rw_set(field->fieldnr,
- (bool)(thd->set_query_id-1));
- if (field->query_id != thd->query_id)
+ MY_BITMAP *current_bitmap, *other_bitmap;
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
{
- /* We only come here in unions */
- field->query_id=thd->query_id;
- table->used_fields++;
- table->used_keys.intersect(field->part_of_key);
+ current_bitmap= table->read_set;
+ other_bitmap= table->write_set;
+ }
+ else
+ {
+ current_bitmap= table->write_set;
+ other_bitmap= table->read_set;
+ }
+ if (!bitmap_fast_test_and_set(current_bitmap, field->field_index))
+ {
+ if (!bitmap_is_set(other_bitmap, field->field_index))
+ {
+ /* First usage of column */
+ table->used_fields++; // Used to optimize loops
+ table->used_keys.intersect(field->part_of_key);
+ }
}
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -5388,17 +5429,17 @@ void Item_insert_value::print(String *str)
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
- bool save_set_query_id= thd->set_query_id;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
/* TODO: Think more about consequences of this step. */
- thd->set_query_id= 0;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
/*
Try to find field by its name and if it will be found
set field_idx properly.
*/
(void)find_field_in_table(thd, table, field_name, (uint) strlen(field_name),
0, &field_idx);
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
triggers= table->triggers;
table_grants= table_grant_info;
}
diff --git a/sql/item.h b/sql/item.h
index f73017563dd..64989725e31 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -721,7 +721,7 @@ public:
static CHARSET_INFO *default_charset();
virtual CHARSET_INFO *compare_collation() { return NULL; }
- virtual bool walk(Item_processor processor, byte *arg)
+ virtual bool walk(Item_processor processor, bool walk_subquery, byte *arg)
{
return (this->*processor)(arg);
}
@@ -743,7 +743,7 @@ public:
virtual bool collect_item_field_processor(byte * arg) { return 0; }
virtual bool find_item_in_field_list_processor(byte *arg) { return 0; }
virtual bool change_context_processor(byte *context) { return 0; }
- virtual bool reset_query_id_processor(byte *query_id) { return 0; }
+ virtual bool register_field_in_read_map(byte *arg) { return 0; }
virtual Item *equal_fields_propagator(byte * arg) { return this; }
virtual Item *set_no_const_sub(byte *arg) { return this; }
@@ -1195,13 +1195,7 @@ public:
Item *get_tmp_table_item(THD *thd);
bool collect_item_field_processor(byte * arg);
bool find_item_in_field_list_processor(byte *arg);
- bool reset_query_id_processor(byte *arg)
- {
- field->query_id= *((query_id_t *) arg);
- if (result_field)
- result_field->query_id= field->query_id;
- return 0;
- }
+ bool register_field_in_read_map(byte *arg);
void cleanup();
Item_equal *find_item_equal(COND_EQUAL *cond_equal);
Item *equal_fields_propagator(byte *arg);
@@ -1808,8 +1802,8 @@ public:
{
return ref ? (*ref)->real_item() : this;
}
- bool walk(Item_processor processor, byte *arg)
- { return (*ref)->walk(processor, arg); }
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg)
+ { return (*ref)->walk(processor, walk_subquery, arg); }
void print(String *str);
void cleanup();
Item_field *filed_for_view_update()
@@ -2058,9 +2052,9 @@ public:
int save_in_field(Field *field_arg, bool no_conversions);
table_map used_tables() const { return (table_map)0L; }
- bool walk(Item_processor processor, byte *args)
+ bool walk(Item_processor processor, bool walk_subquery, byte *args)
{
- return arg->walk(processor, args) ||
+ return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
@@ -2105,9 +2099,9 @@ public:
}
table_map used_tables() const { return (table_map)0L; }
- bool walk(Item_processor processor, byte *args)
+ bool walk(Item_processor processor, bool walk_subquery, byte *args)
{
- return arg->walk(processor, args) ||
+ return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
};
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index eb26f7ff960..ed08e413875 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -216,23 +216,31 @@ longlong Item_func_nop_all::val_int()
static bool convert_constant_item(THD *thd, Field *field, Item **item)
{
+ int result= 0;
if ((*item)->const_item())
{
/* For comparison purposes allow invalid dates like 2000-01-32 */
- ulong orig_sql_mode= field->table->in_use->variables.sql_mode;
- field->table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
+ TABLE *table= field->table;
+ ulong orig_sql_mode= table->in_use->variables.sql_mode;
+ my_bitmap_map *old_write_map=
+ dbug_tmp_use_all_columns(table, table->write_set);
+ my_bitmap_map *old_read_map=
+ dbug_tmp_use_all_columns(table, table->read_set);
+
+ table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
if (!(*item)->save_in_field(field, 1) && !((*item)->null_value))
{
- Item *tmp=new Item_int_with_ref(field->val_int(), *item,
- test(field->flags & UNSIGNED_FLAG));
- field->table->in_use->variables.sql_mode= orig_sql_mode;
+ Item *tmp= new Item_int_with_ref(field->val_int(), *item,
+ test(field->flags & UNSIGNED_FLAG));
if (tmp)
thd->change_item_tree(item, tmp);
- return 1; // Item was replaced
+ result= 1; // Item was replaced
}
- field->table->in_use->variables.sql_mode= orig_sql_mode;
+ table->in_use->variables.sql_mode= orig_sql_mode;
+ dbug_tmp_restore_column_map(table->write_set, old_write_map);
+ dbug_tmp_restore_column_map(table->read_set, old_read_map);
}
- return 0;
+ return result;
}
@@ -2587,14 +2595,14 @@ Item_cond::fix_fields(THD *thd, Item **ref)
return FALSE;
}
-bool Item_cond::walk(Item_processor processor, byte *arg)
+bool Item_cond::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
List_iterator_fast<Item> li(list);
Item *item;
while ((item= li++))
- if (item->walk(processor, arg))
+ if (item->walk(processor, walk_subquery, arg))
return 1;
- return Item_func::walk(processor, arg);
+ return Item_func::walk(processor, walk_subquery, arg);
}
@@ -3840,14 +3848,16 @@ void Item_equal::fix_length_and_dec()
eval_item->cmp_charset= cmp_collation.collation;
}
-bool Item_equal::walk(Item_processor processor, byte *arg)
+bool Item_equal::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
List_iterator_fast<Item_field> it(fields);
Item *item;
while ((item= it++))
- if (item->walk(processor, arg))
+ {
+ if (item->walk(processor, walk_subquery, arg))
return 1;
- return Item_func::walk(processor, arg);
+ }
+ return Item_func::walk(processor, walk_subquery, arg);
}
Item *Item_equal::transform(Item_transformer transformer, byte *arg)
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 1cfdcef02d0..c0a76d6f1e7 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1164,7 +1164,7 @@ public:
COND **conds);
void top_level_item() { abort_on_null=1; }
void copy_andor_arguments(THD *thd, Item_cond *item);
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
void neg_arguments(THD *thd);
@@ -1277,7 +1277,7 @@ public:
void fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
void update_used_tables();
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void print(String *str);
CHARSET_INFO *compare_collation()
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 9281a8a1ddf..6bec261882f 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -193,14 +193,16 @@ Item_func::fix_fields(THD *thd, Item **ref)
return FALSE;
}
-bool Item_func::walk (Item_processor processor, byte *argument)
+
+bool Item_func::walk(Item_processor processor, bool walk_subquery,
+ byte *argument)
{
if (arg_count)
{
Item **arg,**arg_end;
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
{
- if ((*arg)->walk(processor, argument))
+ if ((*arg)->walk(processor, walk_subquery, argument))
return 1;
}
}
@@ -4367,7 +4369,7 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
return TRUE;
}
table=((Item_field *)item)->field->table;
- if (!(table->file->table_flags() & HA_CAN_FULLTEXT))
+ if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT))
{
my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
return 1;
diff --git a/sql/item_func.h b/sql/item_func.h
index a91d93be8c6..4ccffe461bb 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -182,7 +182,7 @@ public:
{
return agg_item_charsets(c, func_name(), items, nitems, flags);
}
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void traverse_cond(Cond_traverser traverser,
void * arg, traverse_order order);
diff --git a/sql/item_row.cc b/sql/item_row.cc
index f5c8d511025..c7b678323a8 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -142,16 +142,18 @@ void Item_row::print(String *str)
str->append(')');
}
-bool Item_row::walk(Item_processor processor, byte *arg)
+
+bool Item_row::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
for (uint i= 0; i < arg_count; i++)
{
- if (items[i]->walk(processor, arg))
+ if (items[i]->walk(processor, walk_subquery, arg))
return 1;
}
return (this->*processor)(arg);
}
+
Item *Item_row::transform(Item_transformer transformer, byte *arg)
{
for (uint i= 0; i < arg_count; i++)
diff --git a/sql/item_row.h b/sql/item_row.h
index d6dd4371372..39913086e8d 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -68,7 +68,7 @@ public:
void update_used_tables();
void print(String *str);
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
uint cols() { return arg_count; }
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 7d7b62df0dc..7b4b7fe8fa5 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -446,10 +446,10 @@ public:
void update_used_tables();
const char *func_name() const { return "make_set"; }
- bool walk(Item_processor processor, byte *arg)
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg)
{
- return item->walk(processor, arg) ||
- Item_str_func::walk(processor, arg);
+ return item->walk(processor, walk_subquery, arg) ||
+ Item_str_func::walk(processor, walk_subquery, arg);
}
Item *transform(Item_transformer transformer, byte *arg)
{
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 6c2ff19825f..c3e826af27e 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -194,6 +194,46 @@ err:
return res;
}
+
+bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
+ byte *argument)
+{
+
+ if (walk_subquery)
+ {
+ for (SELECT_LEX *lex= unit->first_select(); lex; lex= lex->next_select())
+ {
+ List_iterator<Item> li(lex->item_list);
+ Item *item;
+ ORDER *order;
+
+ if (lex->where && (lex->where)->walk(processor, walk_subquery, argument))
+ return 1;
+ if (lex->having && (lex->having)->walk(processor, walk_subquery,
+ argument))
+ return 1;
+
+ while ((item=li++))
+ {
+ if (item->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ for (order= (ORDER*) lex->order_list.first ; order; order= order->next)
+ {
+ if ((*order->item)->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ for (order= (ORDER*) lex->group_list.first ; order; order= order->next)
+ {
+ if ((*order->item)->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ }
+ }
+ return (this->*processor)(argument);
+}
+
+
bool Item_subselect::exec()
{
int res;
@@ -373,7 +413,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
as far as we moved content to upper level, field which depend of
'upper' select is not really dependent => we remove this dependence
*/
- substitution->walk(&Item::remove_dependence_processor,
+ substitution->walk(&Item::remove_dependence_processor, 0,
(byte *) select_lex->outer_select());
/* SELECT without FROM clause can't have WHERE or HAVING clause */
DBUG_ASSERT(join->conds == 0 && join->having == 0);
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index a4dac5bda87..16966718c2f 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -121,6 +121,7 @@ public:
*/
virtual void reset_value_registration() {}
enum_parsing_place place() { return parsing_place; }
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
friend class select_subselect;
friend class Item_in_optimizer;
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 50c22495463..1285e842769 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -353,14 +353,15 @@ Item *Item_sum::get_tmp_table_item(THD *thd)
}
-bool Item_sum::walk (Item_processor processor, byte *argument)
+bool Item_sum::walk (Item_processor processor, bool walk_subquery,
+ byte *argument)
{
if (arg_count)
{
Item **arg,**arg_end;
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
{
- if ((*arg)->walk(processor, argument))
+ if ((*arg)->walk(processor, walk_subquery, argument))
return 1;
}
}
@@ -734,7 +735,7 @@ static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
static int item_sum_distinct_walk(void *element, element_count num_of_dups,
void *item)
{
- return ((Item_sum_distinct*) (item))->unique_walk_function(element);
+ return ((Item_sum_distinct*) (item))->unique_walk_function(element);
}
C_MODE_END
@@ -2688,7 +2689,7 @@ longlong Item_sum_count_distinct::val_int()
return (longlong) count;
}
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
- return table->file->records;
+ return table->file->stats.records;
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index f4ff257aa4e..a0cd08dcb11 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -312,7 +312,7 @@ public:
Item *get_tmp_table_item(THD *thd);
virtual Field *create_tmp_field(bool group, TABLE *table,
uint convert_blob_length);
- bool walk (Item_processor processor, byte *argument);
+ bool walk(Item_processor processor, bool walk_subquery, byte *argument);
bool init_sum_func_check(THD *thd);
bool check_sum_func(THD *thd, Item **ref);
bool register_sum_func(THD *thd, Item **ref);
diff --git a/sql/key.cc b/sql/key.cc
index a407fff4840..11dd267875f 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -301,14 +301,26 @@ bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length)
return 0;
}
- /* unpack key-fields from record to some buffer */
- /* This is used to get a good error message */
+/*
+ unpack key-fields from record to some buffer
+
+ SYNOPSIS
+ key_unpack()
+ to Store value here in an easy to read form
+ table Table to use
+ idx Key number
+
+ NOTES
+ This is used mainly to get a good error message
+ We temporary change the column bitmap so that all columns are readable.
+*/
void key_unpack(String *to,TABLE *table,uint idx)
{
KEY_PART_INFO *key_part,*key_part_end;
Field *field;
String tmp;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
DBUG_ENTER("key_unpack");
to->length(0);
@@ -337,6 +349,7 @@ void key_unpack(String *to,TABLE *table,uint idx)
else
to->append(STRING_WITH_LEN("???"));
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_VOID_RETURN;
}
@@ -373,7 +386,7 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields)
key is not updated
*/
if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
return check_if_key_used(table, table->s->primary_key, fields);
return 0;
}
diff --git a/sql/log.cc b/sql/log.cc
index 7c8f314bc08..a90240ce19d 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -201,8 +201,10 @@ bool Log_to_csv_event_handler::open_log_table(uint log_type)
table->table->file->ha_rnd_init(0))
error= TRUE;
else
+ {
+ table->table->use_all_columns();
table->table->locked_by_logger= TRUE;
-
+ }
/* restore thread settings */
if (curr)
curr->store_globals();
@@ -1172,7 +1174,8 @@ static int binlog_rollback(THD *thd, bool all)
table. Such cases should be rare (updating a
non-transactional table inside a transaction...)
*/
- if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
+ if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG)))
{
Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE);
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
@@ -1233,7 +1236,8 @@ static int binlog_savepoint_rollback(THD *thd, void *sv)
non-transactional table. Otherwise, truncate the binlog cache starting
from the SAVEPOINT command.
*/
- if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
+ if (unlikely(thd->options &
+ (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)))
{
int const error=
thd->binlog_query(THD::STMT_QUERY_TYPE,
@@ -2659,8 +2663,9 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
{
int error;
DBUG_ENTER("THD::binlog_write_table_map");
- DBUG_PRINT("enter", ("table: %p (%s: #%u)",
- table, table->s->table_name, table->s->table_map_id));
+ DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)",
+ (long) table, table->s->table_name,
+ table->s->table_map_id));
/* Pre-conditions */
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
@@ -2673,7 +2678,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
the_event(this, table, table->s->table_map_id, is_trans, flags);
if (is_trans)
- trans_register_ha(this, options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN),
+ trans_register_ha(this,
+ (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
&binlog_hton);
if ((error= mysql_bin_log.write(&the_event)))
@@ -2884,7 +2890,7 @@ bool MYSQL_LOG::write(Log_event *event_info)
if (event_info->get_cache_stmt() && !trans_log_in_use)
trans_register_ha(thd,
(thd->options &
- (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)),
+ (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
&binlog_hton);
if (event_info->get_cache_stmt() || trans_log_in_use)
{
@@ -4365,5 +4371,6 @@ mysql_declare_plugin(binlog)
NULL, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index d51a0ef4c9f..79c7d394276 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1744,21 +1744,22 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
{
if (flags2_inited)
/*
- all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG must
- take their value from flags2.
+ all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG
+ must take their value from flags2.
*/
- thd->options= flags2|(thd->options & ~(ulong)OPTIONS_WRITTEN_TO_BIN_LOG);
+ thd->options= flags2|(thd->options & ~OPTIONS_WRITTEN_TO_BIN_LOG);
/*
else, we are in a 3.23/4.0 binlog; we previously received a
- Rotate_log_event which reset thd->options and sql_mode etc, so nothing to do.
+ Rotate_log_event which reset thd->options and sql_mode etc, so
+ nothing to do.
*/
/*
We do not replicate IGNORE_DIR_IN_CREATE. That is, if the master is a
slave which runs with SQL_MODE=IGNORE_DIR_IN_CREATE, this should not
force us to ignore the dir too. Imagine you are a ring of machines, and
- one has a disk problem so that you temporarily need IGNORE_DIR_IN_CREATE
- on this machine; you don't want it to propagate elsewhere (you don't want
- all slaves to start ignoring the dirs).
+ one has a disk problem so that you temporarily need
+ IGNORE_DIR_IN_CREATE on this machine; you don't want it to propagate
+ elsewhere (you don't want all slaves to start ignoring the dirs).
*/
if (sql_mode_inited)
thd->variables.sql_mode=
@@ -3264,8 +3265,8 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli)
rli->notify_group_master_log_name_update();
rli->group_master_log_pos= pos;
rli->group_relay_log_pos= rli->event_relay_log_pos;
- DBUG_PRINT("info", ("group_master_log_name: '%s' group_master_log_pos:\
-%lu",
+ DBUG_PRINT("info", ("group_master_log_name: '%s' "
+ "group_master_log_pos: %lu",
rli->group_master_log_name,
(ulong) rli->group_master_log_pos));
/*
@@ -5212,8 +5213,9 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
log only the primary key value instead of the entire "before image". This
would save binlog space. TODO
*/
- DBUG_ENTER("Rows_log_event::do_add_row_data(byte *data, my_size_t length)");
- DBUG_PRINT("enter", ("row_data= %p, length= %lu", row_data, length));
+ DBUG_ENTER("Rows_log_event::do_add_row_data");
+ DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
+ length));
DBUG_DUMP("row_data", (const char*)row_data, min(length, 32));
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
@@ -5268,7 +5270,7 @@ static char const *unpack_row(TABLE *table,
{
DBUG_ASSERT(record && row);
- MY_BITMAP *write_set= table->file->write_set;
+ MY_BITMAP *write_set= table->write_set;
my_size_t const n_null_bytes= table->s->null_bytes;
my_ptrdiff_t const offset= record - (byte*) table->record[0];
@@ -5281,13 +5283,13 @@ static char const *unpack_row(TABLE *table,
{
Field *const f= *field_ptr;
- if (bitmap_is_set(cols, field_ptr - begin_ptr))
+ if (bitmap_is_set(cols, (uint) (field_ptr - begin_ptr)))
{
/* Field...::unpack() cannot return 0 */
ptr= f->unpack(f->ptr + offset, ptr);
}
else
- bitmap_clear_bit(write_set, (field_ptr - begin_ptr) + 1);
+ bitmap_clear_bit(write_set, (uint) (field_ptr - begin_ptr));
}
return ptr;
}
@@ -5443,7 +5445,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
error= do_before_row_operations(table);
- while (error == 0 && row_start < (const char*)m_rows_end) {
+ while (error == 0 && row_start < (const char*) m_rows_end)
+ {
char const *row_end= do_prepare_row(thd, table, row_start);
DBUG_ASSERT(row_end != NULL); // cannot happen
DBUG_ASSERT(row_end <= (const char*)m_rows_end);
@@ -5478,8 +5481,10 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
rli->abort_slave=1;);
error= do_after_row_operations(table, error);
if (!cache_stmt)
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
-
+ {
+ DBUG_PRINT("info", ("Marked that we need to keep log"));
+ thd->options|= OPTION_KEEP_LOG;
+ }
}
if (error)
@@ -6267,9 +6272,9 @@ replace_record(THD *thd, TABLE *table)
- use index_read_idx() with the key that is duplicated, to
retrieve the offending row.
*/
- if (table->file->table_flags() & HA_DUPP_POS)
+ if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
- error= table->file->rnd_pos(table->record[1], table->file->dupp_ref);
+ error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
if (error)
return error;
}
@@ -6386,16 +6391,17 @@ static bool record_compare(TABLE *table)
to find (and fetch) the row. If the engine allows random access of the
records, a combination of position() and rnd_pos() will be used.
*/
+
static int find_and_fetch_row(TABLE *table, byte *key)
{
DBUG_ENTER("find_and_fetch_row(TABLE *table, byte *key, byte *record)");
- DBUG_PRINT("enter", ("table=%p, key=%p, record=%p",
- table, key, table->record[1]));
+ DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx",
+ (long) table, (long) key, (long) table->record[1]));
DBUG_ASSERT(table->in_use != NULL);
- if ((table->file->table_flags() & HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS)
- && table->s->primary_key < MAX_KEY)
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
+ table->s->primary_key < MAX_KEY)
{
/*
Use a more efficient method to fetch the record given by
@@ -6411,17 +6417,18 @@ static int find_and_fetch_row(TABLE *table, byte *key)
DBUG_ASSERT(table->record[1]);
/* We need to retrieve all fields */
- table->file->ha_set_all_bits_in_read_set();
+ /* TODO: Move this out from this function to main loop */
+ table->use_all_columns();
if (table->s->keys > 0)
{
int error;
- /*
- We need to set the null bytes to ensure that the filler bit
- are all set when returning. There are storage engines that
- just set the necessary bits on the bytes and don't set the
- filler bits correctly.
- */
+ /*
+ We need to set the null bytes to ensure that the filler bit
+ are all set when returning. There are storage engines that
+ just set the necessary bits on the bytes and don't set the
+ filler bits correctly.
+ */
my_ptrdiff_t const pos=
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
table->record[1][pos]= 0xFF;
@@ -6446,7 +6453,7 @@ static int find_and_fetch_row(TABLE *table, byte *key)
comparison of non-PK columns to decide if the correct record is
found. I can see no scenario where it would be incorrect to
chose the row to change only using a PK or an UNNI.
- */
+ */
if (table->key_info->flags & HA_NOSAME)
DBUG_RETURN(0);
@@ -6550,7 +6557,7 @@ int Delete_rows_log_event::do_before_row_operations(TABLE *table)
{
DBUG_ASSERT(m_memory == NULL);
- if ((table->file->table_flags() & HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS) &&
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
table->s->primary_key < MAX_KEY)
{
/*
@@ -6624,24 +6631,23 @@ char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table,
int Delete_rows_log_event::do_exec_row(TABLE *table)
{
+ int error;
DBUG_ASSERT(table != NULL);
if (table->s->keys > 0)
{
/* We have a key: search the table using the index */
- if (!table->file->inited)
- if (int error= table->file->ha_index_init(0, FALSE))
- return error;
+ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
+ return error;
}
else
{
/* We doesn't have a key: search the table using rnd_next() */
- if (int error= table->file->ha_rnd_init(1))
+ if ((error= table->file->ha_rnd_init(1)))
return error;
}
- int error= find_and_fetch_row(table, m_key);
- if (error)
+ if ((error= find_and_fetch_row(table, m_key)))
return error;
/*
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 93db68d1b50..7789df0da3d 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -278,9 +278,9 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
#define OPTION_BEGIN (LL(1) << 20) // THD, intern
#define OPTION_TABLE_LOCK (LL(1) << 21) // THD, intern
#define OPTION_QUICK (LL(1) << 22) // SELECT (for DELETE)
+#define OPTION_KEEP_LOG (LL(1) << 23) // Keep binlog on rollback
-/* Thr following is used to detect a conflict with DISTINCT
- in the user query has requested */
+/* The following is used to detect a conflict with DISTINCT */
#define SELECT_ALL (LL(1) << 24) // SELECT, user, parser
/* Set if we are updating a non-transaction safe table */
@@ -1109,20 +1109,21 @@ bool insert_fields(THD *thd, Name_resolution_context *context,
List_iterator<Item> *it, bool any_privileges);
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
- Item **conds, TABLE_LIST **leaves, bool select_insert);
+ TABLE_LIST **leaves, bool select_insert);
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list, uint wild_num);
bool setup_fields(THD *thd, Item** ref_pointer_array,
- List<Item> &item, ulong set_query_id,
+ List<Item> &item, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func);
inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array,
- List<Item> &item, ulong set_query_id,
- List<Item> *sum_func_list,
- bool allow_sum_func)
+ List<Item> &item,
+ enum_mark_columns mark_used_columns,
+ List<Item> *sum_func_list,
+ bool allow_sum_func)
{
bool res;
thd->lex->select_lex.no_wrap_view_item= TRUE;
- res= setup_fields(thd, ref_pointer_array, item, set_query_id, sum_func_list,
+ res= setup_fields(thd, ref_pointer_array, item, mark_used_columns, sum_func_list,
allow_sum_func);
thd->lex->select_lex.no_wrap_view_item= FALSE;
return res;
@@ -1764,7 +1765,8 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
void end_read_record(READ_RECORD *info);
ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder,
uint s_length, SQL_SELECT *select,
- ha_rows max_rows, ha_rows *examined_rows);
+ ha_rows max_rows, bool sort_positions,
+ ha_rows *examined_rows);
void filesort_free_buffers(TABLE *table);
void change_double_for_sort(double nr,byte *to);
double my_double_round(double value, int dec, bool truncate);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 0bae436cf78..7f57b3fc425 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2447,10 +2447,12 @@ static int my_message_sql(uint error, const char *str, myf MyFlags)
if (thd->lex->current_select &&
thd->lex->current_select->no_error && !thd->is_fatal_error)
{
- DBUG_PRINT("error", ("Error converted to warning: current_select: no_error %d fatal_error: %d",
- (thd->lex->current_select ?
- thd->lex->current_select->no_error : 0),
- (int) thd->is_fatal_error));
+ DBUG_PRINT("error",
+ ("Error converted to warning: current_select: no_error %d "
+ "fatal_error: %d",
+ (thd->lex->current_select ?
+ thd->lex->current_select->no_error : 0),
+ (int) thd->is_fatal_error));
}
else
{
@@ -5661,7 +5663,6 @@ log and this option does nothing anymore.",
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0},
- /* QQ: The following should be removed soon! (bdb_max_lock preferred) */
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index e4eb6e8ab3f..e2b5bdeacc7 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -820,6 +820,10 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc)
:dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0)
{
+ my_bitmap_map *bitmap;
+ DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
+
+ in_ror_merged_scan= 0;
sorted= 0;
index= key_nr;
head= table;
@@ -843,6 +847,19 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bzero((char*) &alloc,sizeof(alloc));
file= head->file;
record= head->record[0];
+ save_read_set= head->read_set;
+ save_write_set= head->write_set;
+
+ /* Allocate a bitmap for used columns */
+ if (!(bitmap= (my_bitmap_map*) my_malloc(head->s->column_bitmap_size,
+ MYF(MY_WME))))
+ {
+ column_bitmap.bitmap= 0;
+ error= 1;
+ }
+ else
+ bitmap_init(&column_bitmap, bitmap, head->s->fields, FALSE);
+ DBUG_VOID_RETURN;
}
@@ -872,24 +889,26 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
if (file)
{
range_end();
- file->extra(HA_EXTRA_NO_KEYREAD);
if (free_file)
{
DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file,
free_file));
- file->ha_reset();
file->ha_external_lock(current_thd, F_UNLCK);
file->close();
delete file;
}
+ else
+ {
+ file->extra(HA_EXTRA_NO_KEYREAD);
+ }
}
delete_dynamic(&ranges); /* ranges are allocated in alloc */
free_root(&alloc,MYF(0));
+ my_free((char*) column_bitmap.bitmap, MYF(MY_ALLOW_ZERO_PTR));
}
- if (multi_range)
- my_free((char*) multi_range, MYF(0));
- if (multi_range_buff)
- my_free((char*) multi_range_buff, MYF(0));
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ x_free(multi_range);
+ x_free(multi_range_buff);
DBUG_VOID_RETURN;
}
@@ -1009,20 +1028,21 @@ int QUICK_ROR_INTERSECT_SELECT::init()
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
- handler *save_file= file;
+ handler *save_file= file, *org_file;
THD *thd;
+ MY_BITMAP *bitmap;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
+ in_ror_merged_scan= 1;
if (reuse_handler)
{
- DBUG_PRINT("info", ("Reusing handler %p", file));
- if (file->extra(HA_EXTRA_KEYREAD) ||
- file->ha_retrieve_all_pk() ||
- init() || reset())
+ DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file));
+ if (init() || reset())
{
DBUG_RETURN(1);
}
- DBUG_RETURN(0);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+ goto end;
}
/* Create a separate handler object for this quick select */
@@ -1035,19 +1055,20 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
thd= head->in_use;
if (!(file= get_new_handler(head->s, thd->mem_root, head->s->db_type)))
goto failure;
- DBUG_PRINT("info", ("Allocated new handler %p", file));
+ DBUG_PRINT("info", ("Allocated new handler 0x%lx", (long) file));
if (file->ha_open(head, head->s->normalized_path.str, head->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
{
/* Caller will free the memory */
goto failure;
}
+
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
if (file->ha_external_lock(thd, F_RDLCK))
goto failure;
- if (file->extra(HA_EXTRA_KEYREAD) ||
- file->ha_retrieve_all_pk() ||
- init() || reset())
+ if (init() || reset())
{
file->ha_external_lock(thd, F_UNLCK);
file->close();
@@ -1055,11 +1076,28 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
}
free_file= TRUE;
last_rowid= file->ref;
+
+end:
+ /*
+ We are only going to read key fields and call position() on 'file'
+ The following sets head->tmp_set to only use this key and then updates
+ head->read_set and head->write_set to use this bitmap.
+ The now bitmap is stored in 'column_bitmap' which is used in ::get_next()
+ */
+ org_file= head->file;
+ head->file= file;
+ /* We don't have to set 'head->keyread' here as the 'file' is unique */
+ head->mark_columns_used_by_index(index);
+ head->prepare_for_position();
+ head->file= org_file;
+ bitmap_copy(&column_bitmap, head->read_set);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
DBUG_RETURN(0);
failure:
- if (file)
- delete file;
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ delete file;
file= save_file;
DBUG_RETURN(1);
}
@@ -1764,32 +1802,26 @@ public:
static int fill_used_fields_bitmap(PARAM *param)
{
TABLE *table= param->table;
- param->fields_bitmap_size= bitmap_buffer_size(table->s->fields+1);
- uint32 *tmp;
+ my_bitmap_map *tmp;
uint pk;
- if (!(tmp= (uint32*) alloc_root(param->mem_root,param->fields_bitmap_size)) ||
- bitmap_init(&param->needed_fields, tmp, param->fields_bitmap_size*8,
- FALSE))
+ param->fields_bitmap_size= table->s->column_bitmap_size;
+ if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)) ||
+ bitmap_init(&param->needed_fields, tmp, table->s->fields, FALSE))
return 1;
- bitmap_clear_all(&param->needed_fields);
- for (uint i= 0; i < table->s->fields; i++)
- {
- if (param->thd->query_id == table->field[i]->query_id)
- bitmap_set_bit(&param->needed_fields, i+1);
- }
+ bitmap_copy(&param->needed_fields, table->read_set);
+ bitmap_union(&param->needed_fields, table->write_set);
pk= param->table->s->primary_key;
- if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY)
+ if (pk != MAX_KEY && param->table->file->primary_key_is_clustered())
{
/* The table uses clustered PK and it is not internally generated */
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
KEY_PART_INFO *key_part_end= key_part +
param->table->key_info[pk].key_parts;
for (;key_part != key_part_end; ++key_part)
- {
- bitmap_clear_bit(&param->needed_fields, key_part->fieldnr);
- }
+ bitmap_clear_bit(&param->needed_fields, key_part->fieldnr-1);
}
return 0;
}
@@ -1841,7 +1873,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
- DBUG_PRINT("info", ("records=%lu", (ulong)head->file->records));
+ DBUG_PRINT("info", ("records: %lu", (ulong) head->file->stats.records));
delete quick;
quick=0;
needed_reg.clear_all();
@@ -1851,7 +1883,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(0); /* purecov: inspected */
if (keys_to_use.is_clear_all())
DBUG_RETURN(0);
- records= head->file->records;
+ records= head->file->stats.records;
if (!records)
records++; /* purecov: inspected */
scan_time= (double) records / TIME_FOR_COMPARE + 1;
@@ -1876,7 +1908,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag=head->file->table_flags();
+ param.baseflag=head->file->ha_table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -2294,6 +2326,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
PART_PRUNE_PARAM prune_param;
MEM_ROOT alloc;
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
+ my_bitmap_map *old_read_set, *old_write_set;
prune_param.part_info= part_info;
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
@@ -2307,6 +2340,8 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
DBUG_RETURN(FALSE);
}
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
range_par->thd= thd;
range_par->table= table;
/* range_par->cond doesn't need initialization */
@@ -2396,6 +2431,8 @@ all_used:
retval= FALSE; // some partitions are used
mark_all_partitions_as_used(prune_param.part_info);
end:
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
thd->no_errors=0;
thd->mem_root= range_par->old_root;
free_root(&alloc,MYF(0)); // Return memory & allocator
@@ -2422,6 +2459,8 @@ end:
void store_key_image_to_rec(Field *field, char *ptr, uint len)
{
/* Do the same as print_key() does */
+ my_bitmap_map *old_map;
+
if (field->real_maybe_null())
{
if (*ptr)
@@ -2432,7 +2471,10 @@ void store_key_image_to_rec(Field *field, char *ptr, uint len)
field->set_notnull();
ptr++;
}
+ old_map= dbug_tmp_use_all_columns(field->table,
+ field->table->write_set);
field->set_key_image(ptr, len);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
}
@@ -2512,11 +2554,11 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
{
MY_BITMAP all_merges;
uint bitmap_bytes;
- uint32 *bitmap_buf;
+ my_bitmap_map *bitmap_buf;
uint n_bits= ppar->part_info->used_partitions.n_bits;
bitmap_bytes= bitmap_buffer_size(n_bits);
- if (!(bitmap_buf= (uint32*)alloc_root(ppar->range_param.mem_root,
- bitmap_bytes)))
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root,
+ bitmap_bytes)))
{
/*
Fallback, process just the first SEL_IMERGE. This can leave us with more
@@ -2762,7 +2804,8 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
uint32 subpart_id;
bitmap_clear_all(&ppar->subparts_bitmap);
- while ((subpart_id= subpart_iter.get_next(&subpart_iter)) != NOT_A_PARTITION_ID)
+ while ((subpart_id= subpart_iter.get_next(&subpart_iter)) !=
+ NOT_A_PARTITION_ID)
bitmap_set_bit(&ppar->subparts_bitmap, subpart_id);
/* Mark each partition as used in each subpartition. */
@@ -2868,7 +2911,8 @@ process_next_key_part:
/* Got "full range" for subpartitioning fields */
uint32 part_id;
bool found= FALSE;
- while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) != NOT_A_PARTITION_ID)
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
{
ppar->mark_full_partition_used(ppar->part_info, part_id);
found= TRUE;
@@ -3015,11 +3059,12 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
if (ppar->subpart_fields)
{
- uint32 *buf;
+ my_bitmap_map *buf;
uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
- if (!(buf= (uint32*)alloc_root(alloc, bufsize)))
+ if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize)))
return TRUE;
- bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts, FALSE);
+ bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts,
+ FALSE);
}
range_par->key_parts= key_part;
Field **field= (ppar->part_fields)? part_info->part_field_array :
@@ -3186,7 +3231,8 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
else
{
double n_blocks=
- ceil(ulonglong2double(param->table->file->data_file_length) / IO_SIZE);
+ ceil(ulonglong2double(param->table->file->stats.data_file_length) /
+ IO_SIZE);
double busy_blocks=
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records)));
if (busy_blocks < 1.0)
@@ -3355,7 +3401,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost));
if (imerge_too_expensive || (imerge_cost > read_time) ||
- (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) &&
+ (non_cpk_scan_records+cpk_scan_records >= param->table->file->stats.records) &&
read_time != DBL_MAX)
{
/*
@@ -3413,7 +3459,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_trp->read_cost= imerge_cost;
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
imerge_trp->records= min(imerge_trp->records,
- param->table->file->records);
+ param->table->file->stats.records);
imerge_trp->range_scans= range_scans;
imerge_trp->range_scans_end= range_scans + n_child_scans;
read_time= imerge_cost;
@@ -3474,7 +3520,7 @@ skip_to_ror_scan:
((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
roru_total_records += (*cur_roru_plan)->records;
roru_intersect_part *= (*cur_roru_plan)->records /
- param->table->file->records;
+ param->table->file->stats.records;
}
/*
@@ -3484,7 +3530,7 @@ skip_to_ror_scan:
in disjunction do not share key parts.
*/
roru_total_records -= (ha_rows)(roru_intersect_part*
- param->table->file->records);
+ param->table->file->stats.records);
/* ok, got a ROR read plan for each of the disjuncts
Calculate cost:
cost(index_union_scan(scan_1, ... scan_n)) =
@@ -3545,7 +3591,7 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
int keynr)
{
double read_time;
- uint keys_per_block= (param->table->file->block_size/2/
+ uint keys_per_block= (param->table->file->stats.block_size/2/
(param->table->key_info[keynr].key_length+
param->table->file->ref_length) + 1);
read_time=((double) (records+keys_per_block-1)/
@@ -3597,7 +3643,7 @@ static
ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
{
ROR_SCAN_INFO *ror_scan;
- uint32 *bitmap_buf;
+ my_bitmap_map *bitmap_buf;
uint keynr;
DBUG_ENTER("make_ror_scan");
@@ -3612,12 +3658,12 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ror_scan->sel_arg= sel_arg;
ror_scan->records= param->table->quick_rows[keynr];
- if (!(bitmap_buf= (uint32*)alloc_root(param->mem_root,
- param->fields_bitmap_size)))
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
DBUG_RETURN(NULL);
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
- param->fields_bitmap_size*8, FALSE))
+ param->table->s->fields, FALSE))
DBUG_RETURN(NULL);
bitmap_clear_all(&ror_scan->covered_fields);
@@ -3626,8 +3672,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
param->table->key_info[keynr].key_parts;
for (;key_part != key_part_end; ++key_part)
{
- if (bitmap_is_set(&param->needed_fields, key_part->fieldnr))
- bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr);
+ if (bitmap_is_set(&param->needed_fields, key_part->fieldnr-1))
+ bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
}
ror_scan->index_read_cost=
get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr],
@@ -3727,21 +3773,21 @@ static
ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
{
ROR_INTERSECT_INFO *info;
- uint32* buf;
+ my_bitmap_map* buf;
if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
sizeof(ROR_INTERSECT_INFO))))
return NULL;
info->param= param;
- if (!(buf= (uint32*)alloc_root(param->mem_root,
- param->fields_bitmap_size)))
+ if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
return NULL;
- if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
+ if (bitmap_init(&info->covered_fields, buf, param->table->s->fields,
FALSE))
return NULL;
info->is_covering= FALSE;
info->index_scan_costs= 0.0;
info->index_records= 0;
- info->out_rows= param->table->file->records;
+ info->out_rows= param->table->file->stats.records;
bitmap_clear_all(&info->covered_fields);
return info;
}
@@ -3860,14 +3906,14 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
SEL_ARG *sel_arg, *tuple_arg= NULL;
bool cur_covered;
bool prev_covered= test(bitmap_is_set(&info->covered_fields,
- key_part->fieldnr));
+ key_part->fieldnr-1));
key_range min_range;
key_range max_range;
min_range.key= (byte*) key_val;
min_range.flag= HA_READ_KEY_EXACT;
max_range.key= (byte*) key_val;
max_range.flag= HA_READ_AFTER_KEY;
- ha_rows prev_records= info->param->table->file->records;
+ ha_rows prev_records= info->param->table->file->stats.records;
DBUG_ENTER("ror_intersect_selectivity");
for (sel_arg= scan->sel_arg; sel_arg;
@@ -3875,7 +3921,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
{
DBUG_PRINT("info",("sel_arg step"));
cur_covered= test(bitmap_is_set(&info->covered_fields,
- key_part[sel_arg->part].fieldnr));
+ key_part[sel_arg->part].fieldnr-1));
if (cur_covered != prev_covered)
{
/* create (part1val, ..., part{n-1}val) tuple. */
@@ -4004,15 +4050,15 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
}
info->total_cost= info->index_scan_costs;
- DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
+ DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
if (!info->is_covering)
{
info->total_cost +=
get_sweep_read_cost(info->param, double2rows(info->out_rows));
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
- DBUG_PRINT("info", ("New out_rows= %g", info->out_rows));
- DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost,
+ DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
+ DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
info->is_covering?"" : "non-"));
DBUG_RETURN(TRUE);
}
@@ -4091,7 +4137,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
double min_cost= DBL_MAX;
DBUG_ENTER("get_best_ror_intersect");
- if ((tree->n_ror_scans < 2) || !param->table->file->records)
+ if ((tree->n_ror_scans < 2) || !param->table->file->stats.records)
DBUG_RETURN(NULL);
/*
@@ -4260,7 +4306,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
F=set of all fields to cover
S={}
- do {
+ do
+ {
Order I by (#covered fields in F desc,
#components asc,
number of first not covered component asc);
@@ -4278,7 +4325,6 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
ROR_SCAN_INFO **ror_scan_mark;
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
DBUG_ENTER("get_best_covering_ror_intersect");
- uint nbits= param->fields_bitmap_size*8;
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
(*scan)->key_components=
@@ -4292,9 +4338,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
/*I=set of all covering indexes */
ror_scan_mark= tree->ror_scans;
- uint32 int_buf[MAX_KEY/32+1];
+ my_bitmap_map int_buf[MAX_KEY/(sizeof(my_bitmap_map)*8)+1];
MY_BITMAP covered_fields;
- if (bitmap_init(&covered_fields, int_buf, nbits, FALSE))
+ if (bitmap_init(&covered_fields, int_buf, param->table->s->fields, FALSE))
DBUG_RETURN(0);
bitmap_clear_all(&covered_fields);
@@ -4306,7 +4352,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
"building covering ROR-I",
ror_scan_mark, ror_scans_end););
- do {
+ do
+ {
/*
Update changed sorting info:
#covered fields,
@@ -4542,7 +4589,8 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param,
if ((quick_intrsect=
new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table,
- retrieve_full_rows? (!is_covering):FALSE,
+ (retrieve_full_rows? (!is_covering) :
+ FALSE),
parent_alloc)))
{
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
@@ -7159,7 +7207,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
goto err;
quick->records= records;
- if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error ||
+ if (cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error ||
!(range= new(alloc) QUICK_RANGE()))
goto err; // out of memory
@@ -7222,10 +7270,9 @@ err:
rowids into Unique, get the sorted sequence and destroy the Unique.
If table has a clustered primary key that covers all rows (TRUE for bdb
- and innodb currently) and one of the index_merge scans is a scan on PK,
- then
- rows that will be retrieved by PK scan are not put into Unique and
- primary key scan is not performed here, it is performed later separately.
+ and innodb currently) and one of the index_merge scans is a scan on PK,
+ then rows that will be retrieved by PK scan are not put into Unique and
+ primary key scan is not performed here, it is performed later separately.
RETURN
0 OK
@@ -7238,21 +7285,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
QUICK_RANGE_SELECT* cur_quick;
int result;
Unique *unique;
- DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique");
+ MY_BITMAP *save_read_set, *save_write_set;
+ handler *file= head->file;
+ DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
/* We're going to just read rowids. */
- if (head->file->extra(HA_EXTRA_KEYREAD))
- DBUG_RETURN(1);
-
- /*
- Make innodb retrieve all PK member fields, so
- * ha_innobase::position (which uses them) call works.
- * We can filter out rows that will be retrieved by clustered PK.
- (This also creates a deficiency - it is possible that we will retrieve
- parts of key that are not used by current query at all.)
- */
- if (head->file->ha_retrieve_all_pk())
- DBUG_RETURN(1);
+ save_read_set= head->read_set;
+ save_write_set= head->write_set;
+ file->extra(HA_EXTRA_KEYREAD);
+ bitmap_clear_all(&head->tmp_set);
+ head->column_bitmaps_set(&head->tmp_set, &head->tmp_set);
+ head->prepare_for_position();
cur_quick_it.rewind();
cur_quick= cur_quick_it++;
@@ -7265,8 +7308,8 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
if (cur_quick->init() || cur_quick->reset())
DBUG_RETURN(1);
- unique= new Unique(refpos_order_cmp, (void *)head->file,
- head->file->ref_length,
+ unique= new Unique(refpos_order_cmp, (void *)file,
+ file->ref_length,
thd->variables.sortbuff_size);
if (!unique)
DBUG_RETURN(1);
@@ -7309,15 +7352,16 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
}
+ DBUG_PRINT("info", ("ok"));
/* ok, all row ids are in Unique */
result= unique->get(head);
delete unique;
doing_pk_scan= FALSE;
+ /* index_merge currently doesn't support "using index" at all */
+ file->extra(HA_EXTRA_NO_KEYREAD);
+ head->column_bitmaps_set(save_read_set, save_write_set);
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
- /* index_merge currently doesn't support "using index" at all */
- head->file->extra(HA_EXTRA_NO_KEYREAD);
-
DBUG_RETURN(result);
}
@@ -7339,9 +7383,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (doing_pk_scan)
DBUG_RETURN(pk_quick_select->get_next());
- result= read_record.read_record(&read_record);
-
- if (result == -1)
+ if ((result= read_record.read_record(&read_record)) == -1)
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
@@ -7349,7 +7391,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (pk_quick_select)
{
doing_pk_scan= TRUE;
- if ((result= pk_quick_select->init()) || (result= pk_quick_select->reset()))
+ if ((result= pk_quick_select->init()) ||
+ (result= pk_quick_select->reset()))
DBUG_RETURN(result);
DBUG_RETURN(pk_quick_select->get_next());
}
@@ -7391,9 +7434,10 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
quick= quick_it++;
if (cpk_quick)
{
- do {
+ do
+ {
error= quick->get_next();
- }while (!error && !cpk_quick->row_in_ranges());
+ } while (!error && !cpk_quick->row_in_ranges());
}
else
error= quick->get_next();
@@ -7413,7 +7457,8 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
quick= quick_it++;
}
- do {
+ do
+ {
if ((error= quick->get_next()))
DBUG_RETURN(error);
quick->file->position(quick->record);
@@ -7442,7 +7487,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
}
}
- /* We get here iff we got the same row ref in all scans. */
+ /* We get here if we got the same row ref in all scans. */
if (need_to_fetch_row)
error= head->file->rnd_pos(head->record[0], last_rowid);
DBUG_RETURN(error);
@@ -7501,7 +7546,7 @@ int QUICK_ROR_UNION_SELECT::get_next()
}
else
dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
- }while (dup_row);
+ } while (dup_row);
tmp= cur_rowid;
cur_rowid= prev_rowid;
@@ -7511,6 +7556,7 @@ int QUICK_ROR_UNION_SELECT::get_next()
DBUG_RETURN(error);
}
+
int QUICK_RANGE_SELECT::reset()
{
uint mrange_bufsiz;
@@ -7550,7 +7596,7 @@ int QUICK_RANGE_SELECT::reset()
}
/* Allocate the handler buffer if necessary. */
- if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
+ if (file->ha_table_flags() & HA_NEED_READ_RANGE_BUFFER)
{
mrange_bufsiz= min(multi_range_bufsiz,
(QUICK_SELECT_I::records + 1)* head->s->reclength);
@@ -7615,6 +7661,15 @@ int QUICK_RANGE_SELECT::get_next()
(cur_range >= (QUICK_RANGE**) ranges.buffer) &&
(cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements));
+ if (in_ror_merged_scan)
+ {
+ /*
+ We don't need to signal the bitmap change as the bitmap is always the
+ same for this head->file
+ */
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
+ }
+
for (;;)
{
if (in_range)
@@ -7622,10 +7677,7 @@ int QUICK_RANGE_SELECT::get_next()
/* We did already start to read this key. */
result= file->read_multi_range_next(&mrange);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
}
uint count= min(multi_range_length, ranges.elements -
@@ -7634,6 +7686,8 @@ int QUICK_RANGE_SELECT::get_next()
{
/* Ranges have already been used up before. None is left for read. */
in_range= FALSE;
+ if (in_ror_merged_scan)
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
KEY_MULTI_RANGE *mrange_slot, *mrange_end;
@@ -7665,12 +7719,18 @@ int QUICK_RANGE_SELECT::get_next()
result= file->read_multi_range_first(&mrange, multi_range, count,
sorted, multi_range_buff);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
in_range= FALSE; /* No matching rows; go to next set of ranges. */
}
+
+end:
+ in_range= ! result;
+ if (in_ror_merged_scan)
+ {
+ /* Restore bitmaps set on entry */
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
+ }
+ DBUG_RETURN(result);
}
@@ -7847,7 +7907,7 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
uint used_key_parts)
- : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
+ :QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
{
QUICK_RANGE *r;
@@ -8323,9 +8383,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
groups, and thus can be applied after the grouping.
GA4. There are no expressions among G_i, just direct column references.
NGA1.If in the index I there is a gap between the last GROUP attribute G_k,
- and the MIN/MAX attribute C, then NGA must consist of exactly the index
- attributes that constitute the gap. As a result there is a permutation
- of NGA that coincides with the gap in the index <B_1, ..., B_m>.
+ and the MIN/MAX attribute C, then NGA must consist of exactly the
+ index attributes that constitute the gap. As a result there is a
+ permutation of NGA that coincides with the gap in the index
+ <B_1, ..., B_m>.
NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of
equality conditions for all NG_i of the form (NG_i = const) or
(const = NG_i), such that each NG_i is referenced in exactly one
@@ -8333,9 +8394,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
gap in the index.
WA1. There are no other attributes in the WHERE clause except the ones
referenced in predicates RNG, PA, PC, EQ defined above. Therefore
- WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above
- tests. By transitivity then it also follows that each WA_i participates
- in the index I (if this was already tested for GA, NGA and C).
+ WA is subset of (GA union NGA union C) for GA,NGA,C that pass the
+ above tests. By transitivity then it also follows that each WA_i
+ participates in the index I (if this was already tested for GA, NGA
+ and C).
C) Overall query form:
SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)])
@@ -8397,12 +8459,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
TABLE *table= param->table;
bool have_min= FALSE; /* TRUE if there is a MIN function. */
bool have_max= FALSE; /* TRUE if there is a MAX function. */
- Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/
+ Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
KEY *index_info= NULL; /* The index chosen for data access. */
uint index= 0; /* The id of the chosen index. */
- uint group_key_parts= 0; /* Number of index key parts in the group prefix. */
+ uint group_key_parts= 0; // Number of index key parts in the group prefix.
uint used_key_parts= 0; /* Number of index key parts used for access. */
byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/
uint key_infix_len= 0; /* Length of key_infix. */
@@ -8520,28 +8582,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
we check that all query fields are indeed covered by 'cur_index'.
*/
if (pk < MAX_KEY && cur_index != pk &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
{
/* For each table field */
for (uint i= 0; i < table->s->fields; i++)
{
Field *cur_field= table->field[i];
/*
- If the field is used in the current query, check that the
- field is covered by some keypart of the current index.
+ If the field is used in the current query ensure that it's
+ part of 'cur_index'
*/
- if (thd->query_id == cur_field->query_id)
- {
- KEY_PART_INFO *key_part= cur_index_info->key_part;
- KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts;
- for (;;)
- {
- if (key_part->field == cur_field)
- break;
- if (++key_part == key_part_end)
- goto next_index; // Field was not part of key
- }
- }
+ if (bitmap_is_set(table->read_set, cur_field->field_index) &&
+ !cur_field->part_of_key_not_clustered.is_set(cur_index))
+ goto next_index; // Field was not part of key
}
}
@@ -8695,7 +8748,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
key_part_range[1]= last_part;
/* Check if cur_part is referenced in the WHERE clause. */
- if (join->conds->walk(&Item::find_item_in_field_list_processor,
+ if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
(byte*) key_part_range))
goto next_index;
}
@@ -8709,7 +8762,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
{
for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++)
{
- if (cur_part->field->query_id == thd->query_id)
+ if (bitmap_is_set(table->read_set, cur_part->field->field_index))
goto next_index;
}
}
@@ -9173,8 +9226,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */
DBUG_ENTER("cost_group_min_max");
- table_records= table->file->records;
- keys_per_block= (table->file->block_size / 2 /
+ table_records= table->file->stats.records;
+ keys_per_block= (table->file->stats.block_size / 2 /
(index_info->key_length + table->file->ref_length)
+ 1);
num_blocks= (table_records / keys_per_block) + 1;
@@ -10347,6 +10400,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
const char *key_end= key+used_length;
String tmp(buff,sizeof(buff),&my_charset_bin);
uint store_length;
+ TABLE *table= key_part->field->table;
+ my_bitmap_map *old_write_set, *old_read_set;
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
for (; key < key_end; key+=store_length, key_part++)
{
@@ -10372,18 +10429,28 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
}
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
}
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
+ TABLE *table;
+ my_bitmap_map *old_read_map, *old_write_map;
DBUG_ENTER("print_quick");
if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
+ table= quick->head;
+ old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
quick->dbug_dump(0, TRUE);
+ dbug_tmp_restore_column_map(table->read_set, old_read_map);
+ dbug_tmp_restore_column_map(table->write_set, old_write_map);
+
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
DBUG_UNLOCK_FILE;
diff --git a/sql/opt_range.h b/sql/opt_range.h
index bc2496b0769..85cedf663cd 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -192,8 +192,9 @@ public:
function is called.
SYNOPSIS
init_ror_merged_scan()
- reuse_handler If true, the quick select may use table->handler, otherwise
- it must create and use a separate handler object.
+ reuse_handler If true, the quick select may use table->handler,
+ otherwise it must create and use a separate handler
+ object.
RETURN
0 Ok
other Error
@@ -259,7 +260,7 @@ class SEL_ARG;
class QUICK_RANGE_SELECT : public QUICK_SELECT_I
{
protected:
- bool next,dont_free;
+ bool next,dont_free,in_ror_merged_scan;
public:
int error;
protected:
@@ -277,8 +278,8 @@ protected:
freed by QUICK_RANGE_SELECT) */
HANDLER_BUFFER *multi_range_buff; /* the handler buffer (allocated and
freed by QUICK_RANGE_SELECT) */
+ MY_BITMAP column_bitmap, *save_read_set, *save_write_set;
-protected:
friend class TRP_ROR_INTERSECT;
friend
QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index d4e7745551a..ce3f5c5f108 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -55,6 +55,36 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond);
/*
+ Get exact count of rows in all tables
+
+ SYNOPSIS
+ get_exact_records()
+ tables List of tables
+
+ NOTES
+ When this is called, we know all table handlers supports HA_HAS_RECORDS
+ or HA_STATS_RECORDS_IS_EXACT
+
+ RETURN
+ ULONGLONG_MAX Error: Could not calculate number of rows
+ # Multiplication of number of rows in all tables
+*/
+
+static ulonglong get_exact_record_count(TABLE_LIST *tables)
+{
+ ulonglong count= 1;
+ for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf)
+ {
+ ha_rows tmp= tl->table->file->records();
+ if ((tmp == HA_POS_ERROR))
+ return ULONGLONG_MAX;
+ count*= tmp;
+ }
+ return count;
+}
+
+
+/*
Substitutes constants for some COUNT(), MIN() and MAX() functions.
SYNOPSIS
@@ -80,8 +110,8 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
List_iterator_fast<Item> it(all_fields);
int const_result= 1;
bool recalc_const_item= 0;
- longlong count= 1;
- bool is_exact_count= TRUE;
+ ulonglong count= 1;
+ bool is_exact_count= TRUE, maybe_exact_count= TRUE;
table_map removed_tables= 0, outer_tables= 0, used_tables= 0;
table_map where_tables= 0;
Item *item;
@@ -120,22 +150,25 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
used_tables|= tl->table->map;
/*
- If the storage manager of 'tl' gives exact row count, compute the total
- number of rows. If there are no outer table dependencies, this count
- may be used as the real count.
+ If the storage manager of 'tl' gives exact row count as part of
+ statistics (cheap), compute the total number of rows. If there are
+ no outer table dependencies, this count may be used as the real count.
Schema tables are filled after this function is invoked, so we can't
get row count
*/
- if ((tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) ||
+ if (!(tl->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) ||
tl->schema_table)
{
+ maybe_exact_count&= test(!tl->schema_table &&
+ (tl->table->file->ha_table_flags() &
+ HA_HAS_RECORDS));
is_exact_count= FALSE;
count= 1; // ensure count != 0
}
else
{
tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
- count*= tl->table->file->records;
+ count*= tl->table->file->stats.records;
}
}
@@ -157,9 +190,19 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
there are no outer joins.
*/
if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null &&
- !outer_tables && is_exact_count)
+ !outer_tables && maybe_exact_count)
{
- ((Item_sum_count*) item)->make_const(count);
+ if (!is_exact_count)
+ {
+ if ((count= get_exact_record_count(tables)) == ULONGLONG_MAX)
+ {
+ /* Error from handler in counting rows. Don't optimize count() */
+ const_result= 0;
+ continue;
+ }
+ is_exact_count= 1; // count is now exact
+ }
+ ((Item_sum_count*) item)->make_const((longlong) count);
recalc_const_item= 1;
}
else
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 650bd8fc58f..bb0891cdbbe 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -924,8 +924,19 @@ bool Protocol_simple::store(Field *field)
char buff[MAX_FIELD_WIDTH];
String str(buff,sizeof(buff), &my_charset_bin);
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
+ TABLE *table= field->table;
+#ifdef DBUG_OFF
+ my_bitmap_map *old_map= 0;
+ if (table->file)
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+#endif
field->val_str(&str);
+#ifdef DBUG_OFF
+ if (old_map)
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+#endif
+
return store_string_aux(str.ptr(), str.length(), str.charset(), tocs);
}
diff --git a/sql/records.cc b/sql/records.cc
index 5cb9b1e5c47..b2505600b22 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -64,10 +64,7 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
table->status=0; /* And it's always found */
if (!table->file->inited)
- {
table->file->ha_index_init(idx, 1);
- table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
- }
/* read_record will be changed to rr_index in rr_index_first */
info->read_record= rr_index_first;
}
@@ -195,11 +192,11 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
if (!table->sort.addon_field &&
! (specialflag & SPECIAL_SAFE_MODE) &&
thd->variables.read_rnd_buff_size &&
- !(table->file->table_flags() & HA_FAST_KEY_READ) &&
+ !(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
(table->db_stat & HA_READ_ONLY ||
table->reginfo.lock_type <= TL_READ_NO_INSERT) &&
- (ulonglong) table->s->reclength* (table->file->records+
- table->file->deleted) >
+ (ulonglong) table->s->reclength* (table->file->stats.records+
+ table->file->stats.deleted) >
(ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE &&
info->io_cache->end_of_file/info->ref_length * table->s->reclength >
(my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE &&
@@ -239,7 +236,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY ||
!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD) ||
(use_record_cache < 0 &&
- !(table->file->table_flags() & HA_NOT_DELETE_WITH_CACHE))))
+ !(table->file->ha_table_flags() & HA_NOT_DELETE_WITH_CACHE))))
VOID(table->file->extra_opt(HA_EXTRA_CACHE,
thd->variables.read_buff_size));
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index ae45b299196..179097fd958 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -2696,7 +2696,7 @@ bool sys_var_max_user_conn::check(THD *thd, set_var *var)
{
/*
Per-session values of max_user_connections can't be set directly.
- QQ: May be we should have a separate error message for this?
+ May be we should have a separate error message for this?
*/
my_error(ER_GLOBAL_VARIABLE, MYF(0), name);
return TRUE;
@@ -2763,7 +2763,8 @@ static bool set_option_autocommit(THD *thd, set_var *var)
if ((org_options & OPTION_NOT_AUTOCOMMIT))
{
/* We changed to auto_commit mode */
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
if (ha_commit(thd))
return 1;
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index a2bc77714bb..51e765f9ba1 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -2865,30 +2865,8 @@ ER_WRONG_OUTER_JOIN 42000
swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket"
ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON"
ER_NULL_COLUMN_IN_INDEX 42000
- cze "Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL"
- dan "Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL"
- nla "Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL"
- eng "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- jps "Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.",
- est "Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL"
- fre "La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL"
- ger "Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert"
- greek "Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL"
- hun "A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL"
- ita "La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL"
- jpn "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó."
- kor "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä..."
- nor "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- norwegian-ny "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- pol "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- por "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)"
- rum "Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL"
- rus "óÔÏÌÂÅÃ '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL"
- serbian "Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'"
- slo "Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL"
- spa "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL"
- swe "Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL"
+ eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler"
+ swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.64s' till NOT NULL eller använd en annan hanterare"
ER_CANT_FIND_UDF
cze "Nemohu na-Bèíst funkci '%-.64s'"
dan "Kan ikke læse funktionen '%-.64s'"
diff --git a/sql/sp.cc b/sql/sp.cc
index 6f074fd7dce..2dd38429c1f 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -137,6 +137,7 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup)
mysql_proc_table_exists= 0;
DBUG_RETURN(0);
}
+ table->use_all_columns();
DBUG_ASSERT(table->s->system_table);
@@ -182,6 +183,8 @@ static TABLE *open_proc_table_for_update(THD *thd)
tables.lock_type= TL_WRITE;
table= open_ltable(thd, &tables, TL_WRITE);
+ if (table)
+ table->use_all_columns();
/*
Under explicit LOCK TABLES or in prelocked mode we should not
@@ -803,6 +806,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
TABLE_LIST *leaves= 0;
st_used_field used_fields[array_elements(init_fields)];
+ table->use_all_columns();
memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields));
/* Init header */
for (used_field= &used_fields[0];
@@ -836,7 +840,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
thd->lex->select_lex.context.resolve_in_table_list_only(&tables);
setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- &tables, 0, &leaves, FALSE);
+ &tables, &leaves, FALSE);
for (used_field= &used_fields[0];
used_field->field_name;
used_field++)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 8a64799e5f9..54c4ecb789f 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -3443,7 +3443,7 @@ sp_add_to_query_tables(THD *thd, LEX *lex,
table->table_name= thd->strmake(name, table->table_name_length);
table->alias= thd->strdup(name);
table->lock_type= locktype;
- table->select_lex= lex->current_select; // QQ?
+ table->select_lex= lex->current_select;
table->cacheable_table= 1;
lex->add_to_query_tables(table);
diff --git a/sql/spatial.cc b/sql/spatial.cc
index e91653f79d5..bcc92e75435 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -826,7 +826,6 @@ int Gis_polygon::area(double *ar, const char **end_of_data) const
double x, y;
get_point(&x, &y, data);
data+= (SIZEOF_STORED_DOUBLE*2);
- /* QQ: Is the following prev_x+x right ? */
lr_area+= (prev_x + x)* (prev_y - y);
prev_x= x;
prev_y= y;
@@ -949,7 +948,6 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
double x, y;
get_point(&x, &y, data);
data+= (SIZEOF_STORED_DOUBLE*2);
- /* QQ: Is the following prev_x+x right ? */
cur_area+= (prev_x + x) * (prev_y - y);
cur_cx+= x;
cur_cy+= y;
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 09b684c8706..a35c69b668f 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -323,6 +323,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
while (!(read_record_info.read_record(&read_record_info)))
{
@@ -369,6 +370,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_hosts);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100));
password_length= table->field[2]->field_length /
table->field[2]->charset()->mbmaxlen;
@@ -555,6 +557,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_users);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100));
while (!(read_record_info.read_record(&read_record_info)))
{
@@ -1785,14 +1788,15 @@ static bool update_user_table(THD *thd, TABLE *table,
DBUG_ENTER("update_user_table");
DBUG_PRINT("enter",("user: %s host: %s",user,host));
+ table->use_all_columns();
table->field[0]->store(host,(uint) strlen(host), system_charset_info);
table->field[1]->store(user,(uint) strlen(user), system_charset_info);
key_copy((byte *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
- (byte *) user_key, table->key_info->key_length,
+ (byte *) user_key,
+ table->key_info->key_length,
HA_READ_KEY_EXACT))
{
my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH),
@@ -1875,12 +1879,14 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
password=combo.password.str;
}
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
- table->field[1]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->use_all_columns();
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
+ table->field[1]->store(combo.user.str,combo.user.length,
+ system_charset_info);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2016,7 +2022,6 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
We should NEVER delete from the user table, as a uses can still
use mysqld even if he doesn't have any privileges in the user table!
*/
- table->file->ha_retrieve_all_cols();
if (cmp_record(table,record[1]) &&
(error=table->file->ha_update_row(table->record[1],table->record[0])))
{ // This should never happen
@@ -2092,13 +2097,15 @@ static int replace_db_table(TABLE *table, const char *db,
DBUG_RETURN(-1);
}
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->use_all_columns();
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0],0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2110,9 +2117,11 @@ static int replace_db_table(TABLE *table, const char *db,
}
old_row_exists = 0;
restore_record(table, s->default_values);
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
}
else
{
@@ -2134,18 +2143,17 @@ static int replace_db_table(TABLE *table, const char *db,
/* update old existing row */
if (rights)
{
- table->file->ha_retrieve_all_cols();
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error= table->file->ha_update_row(table->record[1],
+ table->record[0])))
goto table_error; /* purecov: deadcode */
}
else /* must have been a revoke of all privileges */
{
- if ((error = table->file->ha_delete_row(table->record[1])))
+ if ((error= table->file->ha_delete_row(table->record[1])))
goto table_error; /* purecov: deadcode */
}
}
- else if (rights && (error=table->file->ha_write_row(table->record[0])))
+ else if (rights && (error= table->file->ha_write_row(table->record[0])))
{
if (error && error != HA_ERR_FOUND_DUPP_KEY) /* purecov: inspected */
goto table_error; /* purecov: deadcode */
@@ -2301,7 +2309,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
uint key_prefix_len;
KEY_PART_INFO *key_part= col_privs->key_info->key_part;
col_privs->field[0]->store(host.hostname,
- host.hostname ? (uint) strlen(host.hostname) : 0,
+ host.hostname ? (uint) strlen(host.hostname) :
+ 0,
system_charset_info);
col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info);
col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info);
@@ -2442,6 +2451,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
KEY_PART_INFO *key_part= table->key_info->key_part;
DBUG_ENTER("replace_column_table");
+ table->use_all_columns();
table->field[0]->store(combo.host.str,combo.host.length,
system_charset_info);
table->field[1]->store(db,(uint) strlen(db),
@@ -2477,7 +2487,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read(table->record[0], user_key,
table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2555,7 +2564,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
key_prefix_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read(table->record[0], user_key,
key_prefix_length,
HA_READ_KEY_EXACT))
@@ -2645,16 +2653,19 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
DBUG_RETURN(-1); /* purecov: deadcode */
}
+ table->use_all_columns();
restore_record(table, s->default_values); // Get empty record
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
- table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
+ table->field[3]->store(table_name,(uint) strlen(table_name),
+ system_charset_info);
store_record(table,record[1]); // store at pos 1
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2767,6 +2778,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
DBUG_RETURN(-1);
}
+ table->use_all_columns();
restore_record(table, s->default_values); // Get empty record
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1);
@@ -3464,10 +3476,14 @@ static my_bool grant_load(TABLE_LIST *tables)
0,0);
init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0);
- t_table = tables[0].table; c_table = tables[1].table;
+ t_table = tables[0].table;
+ c_table = tables[1].table;
p_table= tables[2].table;
t_table->file->ha_index_init(0, 1);
p_table->file->ha_index_init(0, 1);
+ t_table->use_all_columns();
+ c_table->use_all_columns();
+ p_table->use_all_columns();
if (!t_table->file->index_first(t_table->record[0]))
{
memex_ptr= &memex;
@@ -3475,7 +3491,7 @@ static my_bool grant_load(TABLE_LIST *tables)
do
{
GRANT_TABLE *mem_check;
- if (!(mem_check=new GRANT_TABLE(t_table,c_table)))
+ if (!(mem_check=new (memex_ptr) GRANT_TABLE(t_table,c_table)))
{
/* This could only happen if we are out memory */
grant_option= FALSE;
@@ -3513,7 +3529,7 @@ static my_bool grant_load(TABLE_LIST *tables)
{
GRANT_NAME *mem_check;
HASH *hash;
- if (!(mem_check=new GRANT_NAME(p_table)))
+ if (!(mem_check=new (&memex) GRANT_NAME(p_table)))
{
/* This could only happen if we are out memory */
grant_option= FALSE;
@@ -4891,6 +4907,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
uint key_prefix_length;
DBUG_ENTER("handle_grant_table");
+ table->use_all_columns();
if (! table_no) // mysql.user table
{
/*
@@ -5538,7 +5555,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (!strcmp(lex_user->user.str,user) &&
!my_strcasecmp(system_charset_info, lex_user->host.str, host))
{
- if (!replace_db_table(tables[1].table, acl_db->db, *lex_user, ~(ulong)0, 1))
+ if (!replace_db_table(tables[1].table, acl_db->db, *lex_user,
+ ~(ulong)0, 1))
{
/*
Don't increment counter as replace_db_table deleted the
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 77d2b165881..aa571497683 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -947,8 +947,13 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
{
for (; table ; table= table->next)
+ {
if (table->query_id == thd->query_id)
+ {
table->query_id= 0;
+ table->file->ha_reset();
+ }
+ }
}
@@ -1028,21 +1033,13 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
*/
ha_commit_stmt(thd);
- /* We are under simple LOCK TABLES so should not do anything else. */
- if (!prelocked_mode)
- DBUG_VOID_RETURN;
+ /* Ensure we are calling ha_reset() for all used tables */
+ mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
- if (!thd->lex->requires_prelocking())
- {
- /*
- If we are executing one of substatements we have to mark
- all tables which it used as free for reuse.
- */
- mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
+ /* We are under simple LOCK TABLES so should not do anything else. */
+ if (!prelocked_mode || !thd->lex->requires_prelocking())
DBUG_VOID_RETURN;
- }
- DBUG_ASSERT(prelocked_mode);
/*
We are in prelocked mode, so we have to leave it now with doing
implicit UNLOCK TABLES if need.
@@ -1094,7 +1091,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
found_old_table= 0;
while (thd->open_tables)
- found_old_table|=close_thread_table(thd, &thd->open_tables);
+ found_old_table|= close_thread_table(thd, &thd->open_tables);
thd->some_tables_deleted=0;
/* Free tables to hold down open files */
@@ -1123,6 +1120,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
DBUG_VOID_RETURN;
}
+
/* move one table to free list */
bool close_thread_table(THD *thd, TABLE **table_ptr)
@@ -1147,11 +1145,8 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
table->s->flush_version= flush_version;
table->file->extra(HA_EXTRA_FLUSH);
}
- else
- {
- // Free memory and reset for next loop
- table->file->ha_reset();
- }
+ // Free memory and reset for next loop
+ table->file->ha_reset();
table->in_use=0;
if (unused_tables)
{
@@ -1181,10 +1176,8 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
void close_temporary_tables(THD *thd)
{
- TABLE *next,
- *prev_table /* prev link is not maintained in TABLE's double-linked list */,
- *table;
- char *query= (gptr) 0, *end;
+ TABLE *next, *prev_table, *table;
+ char *query= 0, *end;
uint query_buf_size, max_names_len;
bool found_user_tables;
@@ -2085,6 +2078,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (table->timestamp_field)
table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
+ table->clear_column_bitmaps();
DBUG_ASSERT(table->key_read == 0);
DBUG_RETURN(table);
}
@@ -2182,6 +2176,7 @@ static bool reopen_table(TABLE *table)
VOID(closefrm(table, 1)); // close file, free everything
*table= tmp;
+ table->default_column_bitmaps();
table->file->change_table_ptr(table, table->s);
DBUG_ASSERT(table->alias != 0);
@@ -3549,22 +3544,50 @@ Field *view_ref_found= (Field*) 0x2;
static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
{
- if (thd->set_query_id)
+ DBUG_ENTER("update_field_dependencies");
+ if (thd->mark_used_columns != MARK_COLUMNS_NONE)
{
- table->file->ha_set_bit_in_rw_set(field->fieldnr,
- (bool)(thd->set_query_id-1));
- if (field->query_id != thd->query_id)
+ MY_BITMAP *current_bitmap, *other_bitmap;
+
+ /*
+ We always want to register the used keys, as the column bitmap may have
+ been set for all fields (for example for view).
+ */
+
+ table->used_keys.intersect(field->part_of_key);
+ table->merge_keys.merge(field->part_of_key);
+
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
{
- if (table->get_fields_in_item_tree)
- field->flags|= GET_FIXED_FIELDS_FLAG;
- field->query_id= thd->query_id;
- table->used_fields++;
- table->used_keys.intersect(field->part_of_key);
+ current_bitmap= table->read_set;
+ other_bitmap= table->write_set;
}
else
- thd->dupp_field= field;
- } else if (table->get_fields_in_item_tree)
+ {
+ current_bitmap= table->write_set;
+ other_bitmap= table->read_set;
+ }
+
+ if (bitmap_fast_test_and_set(current_bitmap, field->field_index))
+ {
+ if (thd->mark_used_columns == MARK_COLUMNS_WRITE)
+ {
+ DBUG_PRINT("warning", ("Found duplicated field"));
+ thd->dup_field= field;
+ }
+ else
+ {
+ DBUG_PRINT("note", ("Field found before"));
+ }
+ DBUG_VOID_RETURN;
+ }
+ if (table->get_fields_in_item_tree)
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ table->used_fields++;
+ }
+ else if (table->get_fields_in_item_tree)
field->flags|= GET_FIXED_FIELDS_FLAG;
+ DBUG_VOID_RETURN;
}
@@ -3973,12 +3996,12 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
fld= WRONG_GRANT;
else
#endif
- if (thd->set_query_id)
+ if (thd->mark_used_columns != MARK_COLUMNS_NONE)
{
/*
- * get rw_set correct for this field so that the handler
- * knows that this field is involved in the query and gets
- * retrieved/updated
+ Get rw_set correct for this field so that the handler
+ knows that this field is involved in the query and gets
+ retrieved/updated
*/
Field *field_to_set= NULL;
if (fld == view_ref_found)
@@ -3986,13 +4009,22 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
Item *it= (*ref)->real_item();
if (it->type() == Item::FIELD_ITEM)
field_to_set= ((Item_field*)it)->field;
+ else
+ {
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ it->walk(&Item::register_field_in_read_map, 1, (byte *) 0);
+ }
}
else
field_to_set= fld;
if (field_to_set)
- field_to_set->table->file->
- ha_set_bit_in_rw_set(field_to_set->fieldnr,
- (bool)(thd->set_query_id-1));
+ {
+ TABLE *table= field_to_set->table;
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ bitmap_set_bit(table->read_set, field_to_set->field_index);
+ else
+ bitmap_set_bit(table->write_set, field_to_set->field_index);
+ }
}
}
DBUG_RETURN(fld);
@@ -4685,17 +4717,17 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
{
TABLE *table_1= nj_col_1->table_ref->table;
/* Mark field_1 used for table cache. */
- field_1->query_id= thd->query_id;
- table_1->file->ha_set_bit_in_read_set(field_1->fieldnr);
+ bitmap_set_bit(table_1->read_set, field_1->field_index);
table_1->used_keys.intersect(field_1->part_of_key);
+ table_1->merge_keys.merge(field_1->part_of_key);
}
if (field_2)
{
TABLE *table_2= nj_col_2->table_ref->table;
/* Mark field_2 used for table cache. */
- field_2->query_id= thd->query_id;
- table_2->file->ha_set_bit_in_read_set(field_2->fieldnr);
+ bitmap_set_bit(table_2->read_set, field_2->field_index);
table_2->used_keys.intersect(field_2->part_of_key);
+ table_2->merge_keys.merge(field_2->part_of_key);
}
if (using_fields != NULL)
@@ -5163,17 +5195,17 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
****************************************************************************/
bool setup_fields(THD *thd, Item **ref_pointer_array,
- List<Item> &fields, ulong set_query_id,
+ List<Item> &fields, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func)
{
reg2 Item *item;
- ulong save_set_query_id= thd->set_query_id;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
List_iterator<Item> it(fields);
DBUG_ENTER("setup_fields");
- thd->set_query_id=set_query_id;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
if (allow_sum_func)
thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
thd->where= THD::DEFAULT_WHERE;
@@ -5199,8 +5231,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
(item= *(it.ref()))->check_cols(1))
{
thd->lex->allow_sum_func= save_allow_sum_func;
- thd->set_query_id= save_set_query_id;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(TRUE); /* purecov: inspected */
}
if (ref)
@@ -5211,8 +5243,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
thd->used_tables|= item->used_tables();
}
thd->lex->allow_sum_func= save_allow_sum_func;
- thd->set_query_id= save_set_query_id;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(test(thd->net.report_error));
}
@@ -5256,7 +5288,6 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
context name resolution contest to setup table list there
from_clause Top-level list of table references in the FROM clause
tables Table list (select_lex->table_list)
- conds Condition of current SELECT (can be changed by VIEW)
leaves List of join table leaves list (select_lex->leaf_tables)
refresh It is onle refresh for subquery
select_insert It is SELECT ... INSERT command
@@ -5278,7 +5309,7 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
- Item **conds, TABLE_LIST **leaves, bool select_insert)
+ TABLE_LIST **leaves, bool select_insert)
{
uint tablenr= 0;
DBUG_ENTER("setup_tables");
@@ -5310,6 +5341,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
}
setup_table_map(table, table_list, tablenr);
table->used_keys= table->s->keys_for_keyread;
+ table->merge_keys.clear_all();
if (table_list->use_index)
{
key_map map;
@@ -5483,7 +5515,6 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
}
#endif
-
/*
Update the tables used in the query based on the referenced fields. For
views and natural joins this update is performed inside the loop below.
@@ -5549,18 +5580,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
if ((field= field_iterator.field()))
{
- /*
- Mark if field used before in this select.
- Used by 'insert' to verify if a field name is used twice.
- */
- if (field->query_id == thd->query_id)
- thd->dupp_field= field;
- field->query_id= thd->query_id;
- field->table->file->ha_set_bit_in_read_set(field->fieldnr);
-
+ /* Mark fields as used to allow storage engine to optimze access */
+ bitmap_set_bit(field->table->read_set, field->field_index);
if (table)
+ {
table->used_keys.intersect(field->part_of_key);
-
+ table->merge_keys.merge(field->part_of_key);
+ }
if (tables->is_natural_join)
{
TABLE *field_table;
@@ -5577,16 +5603,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
{
thd->used_tables|= field_table->map;
field_table->used_keys.intersect(field->part_of_key);
+ field_table->merge_keys.merge(field->part_of_key);
field_table->used_fields++;
}
}
}
else
- {
thd->used_tables|= item->used_tables();
- item->walk(&Item::reset_query_id_processor,
- (byte *)(&thd->query_id));
- }
}
/*
In case of stored tables, all fields are considered as used,
@@ -5595,10 +5618,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
For NATURAL joins, used_tables is updated in the IF above.
*/
if (table)
- {
table->used_fields= table->s->fields;
- table->file->ha_set_all_bits_in_read_set();
- }
}
if (found)
DBUG_RETURN(FALSE);
@@ -5657,8 +5677,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
arena->is_conventional())
arena= 0; // For easier test
- thd->set_query_id=1;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= MARK_COLUMNS_READ;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
select_lex->cond_count= 0;
for (table= tables; table; table= table->next_local)
@@ -5913,7 +5933,7 @@ static void mysql_rm_tmp_tables(void)
if (!bcmp(file->name,tmp_file_prefix,tmp_file_prefix_length))
{
- sprintf(filePath,"%s%s",tmpdir,file->name);
+ sprintf(filePath,"%s%c%s",tmpdir,FN_LIBCHAR,file->name);
VOID(my_delete(filePath,MYF(MY_WME)));
}
}
@@ -6159,7 +6179,7 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
alias alias for table
db database
table_name name of table
- db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..)
+ db_stat open flags (for example ->OPEN_KEYFILE|HA_OPEN_RNDFILE..)
can be 0 (example in ha_example_table)
prgflag READ_ALL etc..
ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc..
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index 35c501ede56..3a7fa4b661a 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -66,6 +66,7 @@ public:
my_bool is_clear_all() const { return bitmap_is_clear_all(&map); }
my_bool is_set_all() const { return bitmap_is_set_all(&map); }
my_bool is_subset(const Bitmap& map2) const { return bitmap_is_subset(&map, &map2.map); }
+ my_bool is_overlapping(const Bitmap& map2) const { return bitmap_is_overlapping(&map, map2.map); }
my_bool operator==(const Bitmap& map2) const { return bitmap_cmp(&map, &map2.map); }
char *print(char *buf) const
{
@@ -132,6 +133,7 @@ public:
my_bool is_clear_all() const { return map == (ulonglong)0; }
my_bool is_set_all() const { return map == ~(ulonglong)0; }
my_bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); }
+ my_bool is_overlapping(const Bitmap<64>& map2) const { return (map & map2.map)!= 0; }
my_bool operator==(const Bitmap<64>& map2) const { return map == map2.map; }
char *print(char *buf) const { longlong2str(map,buf,16); return buf; }
ulonglong to_ulonglong() const { return map; }
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 6166771e5d1..581f47ac7f7 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -240,6 +240,7 @@ THD::THD()
bzero(ha_data, sizeof(ha_data));
mysys_var=0;
binlog_evt_union.do_union= FALSE;
+ enable_slow_log= 0;
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
#endif
@@ -1632,7 +1633,7 @@ Statement::Statement(enum enum_state state_arg, ulong id_arg,
ulong alloc_block_size, ulong prealloc_size)
:Query_arena(&main_mem_root, state_arg),
id(id_arg),
- set_query_id(1),
+ mark_used_columns(MARK_COLUMNS_READ),
lex(&main_lex),
query(0),
query_length(0),
@@ -1652,7 +1653,7 @@ Query_arena::Type Statement::type() const
void Statement::set_statement(Statement *stmt)
{
id= stmt->id;
- set_query_id= stmt->set_query_id;
+ mark_used_columns= stmt->mark_used_columns;
lex= stmt->lex;
query= stmt->query;
query_length= stmt->query_length;
@@ -2428,6 +2429,7 @@ field_type_name(enum_field_types type)
return "Unknown";
}
+
my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
{
my_size_t length= 0;
@@ -2444,53 +2446,52 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
return length;
}
+
my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data,
const byte *record) const
{
- Field **p_field= table->field, *field= *p_field;
+ Field **p_field= table->field, *field;
int n_null_bytes= table->s->null_bytes;
- my_ptrdiff_t const offset= record - (byte*) table->record[0];
-
+ byte *ptr;
+ uint i;
+ my_ptrdiff_t const offset= (my_ptrdiff_t) (record - (byte*)
+ table->record[0]);
memcpy(row_data, record, n_null_bytes);
- byte *ptr= row_data+n_null_bytes;
+ ptr= row_data+n_null_bytes;
- for (int i= 0 ; field ; i++, p_field++, field= *p_field)
+ for (i= 0 ; (field= *p_field) ; i++, p_field++)
{
if (bitmap_is_set(cols,i))
ptr= (byte*)field->pack((char *) ptr, field->ptr + offset);
}
-
- /*
- my_ptrdiff_t is signed, size_t is unsigned. Assert that the
- conversion will work correctly.
- */
- DBUG_ASSERT(ptr - row_data >= 0);
- return (static_cast<size_t>(ptr - row_data));
+ return (static_cast<my_size_t>(ptr - row_data));
}
+
int THD::binlog_write_row(TABLE* table, bool is_trans,
MY_BITMAP const* cols, my_size_t colcnt,
byte const *record)
{
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
- /*
- Pack records into format for transfer. We are allocating more
- memory than needed, but that doesn't matter.
+ /*
+ Pack records into format for transfer. We are allocating more
+ memory than needed, but that doesn't matter.
*/
bool error= 0;
byte *row_data= table->write_row_record;
my_size_t const max_len= max_row_length(table, record);
-
- /*
- * Allocate room for a row (if needed)
- */
+ my_size_t len;
+ Rows_log_event *ev;
+
+ /* Allocate room for a row (if needed) */
if (!row_data)
{
if (!table->s->blob_fields)
{
/* multiply max_len by 2 so it can be used for update_row as well */
- table->write_row_record= (byte *) alloc_root(&table->mem_root, 2*max_len);
+ table->write_row_record= (byte *) alloc_root(&table->mem_root,
+ 2*max_len);
if (!table->write_row_record)
return HA_ERR_OUT_OF_MEM;
row_data= table->write_row_record;
@@ -2498,12 +2499,11 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
else if (unlikely(!(row_data= (byte *) my_malloc(max_len, MYF(MY_WME)))))
return HA_ERR_OUT_OF_MEM;
}
- my_size_t const len= pack_row(table, cols, row_data, record);
+ len= pack_row(table, cols, row_data, record);
- Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
- len, is_trans,
- static_cast<Write_rows_log_event*>(0));
+ ev= binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ len, is_trans,
+ static_cast<Write_rows_log_event*>(0));
/* add_row_data copies row_data to internal buffer */
error= likely(ev != 0) ? ev->add_row_data(row_data,len) : HA_ERR_OUT_OF_MEM ;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 54f256997d0..510dbbbb590 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -37,9 +37,10 @@ enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
-
-enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN,
- CHECK_FIELD_ERROR_FOR_NULL };
+enum enum_check_fields
+{ CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL };
+enum enum_mark_columns
+{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
extern char internal_table_name[2];
extern const char **errmesg;
@@ -465,17 +466,17 @@ public:
ulong id;
/*
- - if set_query_id=1, we set field->query_id for all fields. In that case
- field list can not contain duplicates.
- 0: Means query_id is not set and no indicator to handler of fields used
- is set
- 1: Means query_id is set for fields in list and bit in read set is set
- to inform handler of that field is to be read
- 2: Means query is set for fields in list and bit is set in update set
- to inform handler that it needs to update this field in write_row
- and update_row
+ MARK_COLUMNS_NONE: Means mark_used_colums is not set and no indicator to
+ handler of fields used is set
+ MARK_COLUMNS_READ: Means a bit in read set is set to inform handler
+ that the field is to be read. If field list contains
+ duplicates, then thd->dup_field is set to point
+ to the last found duplicate.
+ MARK_COLUMNS_WRITE: Means a bit is set in write set to inform handler
+ that it needs to update this field in write_row
+ and update_row.
*/
- ulong set_query_id;
+ enum enum_mark_columns mark_used_columns;
LEX_STRING name; /* name for named prepared statements */
LEX *lex; // parse tree descriptor
@@ -1012,7 +1013,7 @@ public:
#endif
}
} transaction;
- Field *dupp_field;
+ Field *dup_field;
#ifndef __WIN__
sigset_t signals,block_signals;
#endif
@@ -1391,7 +1392,8 @@ public:
}
inline void reset_current_stmt_binlog_row_based()
{
- current_stmt_binlog_row_based= test(variables.binlog_format == BINLOG_FORMAT_ROW);
+ current_stmt_binlog_row_based= test(variables.binlog_format ==
+ BINLOG_FORMAT_ROW);
}
};
@@ -1557,6 +1559,7 @@ class select_insert :public select_result_interceptor {
int prepare2(void);
bool send_data(List<Item> &items);
virtual void store_values(List<Item> &values);
+ virtual bool can_rollback_data() { return 0; }
void send_error(uint errcode,const char *err);
bool send_eof();
/* not implemented: select_insert is never re-used in prepared statements */
@@ -1578,17 +1581,19 @@ public:
List<create_field> &fields_par,
List<Key> &keys_par,
List<Item> &select_fields,enum_duplicates duplic, bool ignore)
- :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table),
- extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par),
+ :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
+ create_table(table), extra_fields(&fields_par),keys(&keys_par),
+ create_info(create_info_par),
lock(0)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
-
void binlog_show_create_table(TABLE **tables, uint count);
void store_values(List<Item> &values);
void send_error(uint errcode,const char *err);
bool send_eof();
void abort();
+ virtual bool can_rollback_data() { return 1; }
+
// Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
THD *get_thd(void) { return thd; }
};
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 44b0fe1a2f1..65434c10ac6 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -80,9 +80,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
!(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) &&
!(table->triggers && table->triggers->has_delete_triggers()))
{
- /* Update the table->file->records number */
+ /* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
- ha_rows const maybe_deleted= table->file->records;
+ ha_rows const maybe_deleted= table->file->stats.records;
/*
If all rows shall be deleted, we (almost) always log this
statement-based (see [binlog], below), so we set this flag and
@@ -113,7 +113,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
DBUG_RETURN(0);
}
#endif
- /* Update the table->file->records number */
+ /* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
table->used_keys.clear_all();
@@ -184,7 +184,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (!(sortorder= make_unireg_sortorder((ORDER*) order->first,
&length)) ||
(table->sort.found_records = filesort(thd, table, sortorder, length,
- select, HA_POS_ERROR,
+ select, HA_POS_ERROR, 1,
&examined_rows))
== HA_POS_ERROR)
{
@@ -226,6 +226,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (ha_delete_all_rows)
thd->options&= ~static_cast<ulonglong>(OPTION_BIN_LOG);
+ table->mark_columns_needed_for_delete();
+
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
@@ -285,7 +287,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
}
thd->proc_info= "end";
end_read_record(&info);
- free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
@@ -396,7 +397,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds)
thd->lex->allow_sum_func= 0;
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, conds, &select_lex->leaf_tables,
+ table_list, &select_lex->leaf_tables,
FALSE) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
setup_ftfuncs(select_lex))
@@ -458,7 +459,7 @@ bool mysql_multi_delete_prepare(THD *thd)
*/
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- lex->query_tables, &lex->select_lex.where,
+ lex->query_tables,
&lex->select_lex.leaf_tables, FALSE))
DBUG_RETURN(TRUE);
@@ -565,6 +566,8 @@ multi_delete::initialize_tables(JOIN *join)
transactional_tables= 1;
else
normal_tables= 1;
+ tbl->prepare_for_position();
+ tbl->mark_columns_needed_for_delete();
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
@@ -604,7 +607,6 @@ multi_delete::~multi_delete()
table_being_deleted= table_being_deleted->next_local)
{
TABLE *table= table_being_deleted->table;
- free_io_cache(table); // Alloced by unique
table->no_keyread=0;
}
diff --git a/sql/sql_do.cc b/sql/sql_do.cc
index 08388dee516..98483ce2de6 100644
--- a/sql/sql_do.cc
+++ b/sql/sql_do.cc
@@ -24,7 +24,7 @@ bool mysql_do(THD *thd, List<Item> &values)
List_iterator<Item> li(values);
Item *value;
DBUG_ENTER("mysql_do");
- if (setup_fields(thd, 0, values, 0, 0, 0))
+ if (setup_fields(thd, 0, values, MARK_COLUMNS_NONE, 0, 0))
DBUG_RETURN(TRUE);
while ((value = li++))
value->val_int();
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 18b63ba49a3..bf035401bea 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -188,13 +188,13 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen)
/* for now HANDLER can be used only for real TABLES */
tables->required_type= FRMTYPE_TABLE;
error= open_tables(thd, &tables, &counter, 0);
-
HANDLER_TABLES_HACK(thd);
+
if (error)
goto err;
/* There can be only one table in '*tables'. */
- if (! (tables->table->file->table_flags() & HA_CAN_SQL_HANDLER))
+ if (! (tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER))
{
if (! reopen)
my_error(ER_ILLEGAL_HA, MYF(0), tables->alias);
@@ -421,6 +421,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (!lock)
goto err0; // mysql_lock_tables() printed error message already
+ // Always read all columns
+ tables->table->read_set= &tables->table->s->all_set;
+
if (cond)
{
if (table->query_id != thd->query_id)
@@ -514,6 +517,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
Item *item;
for (key_len=0 ; (item=it_ke++) ; key_part++)
{
+ my_bitmap_map *old_map;
// 'item' can be changed by fix_fields() call
if ((!item->fixed &&
item->fix_fields(thd, it_ke.ref())) ||
@@ -524,16 +528,19 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ");
goto err;
}
+ old_map= dbug_tmp_use_all_columns(table, table->write_set);
(void) item->save_in_field(key_part->field, 1);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
key_len+=key_part->store_length;
}
+
if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
goto err;
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno, 1);
key_copy(key, table->record[0], table->key_info + keyno, key_len);
error= table->file->index_read(table->record[0],
- key,key_len,ha_rkey_mode);
+ key,key_len,ha_rkey_mode);
mode=rkey_to_rnext[(int)ha_rkey_mode];
break;
}
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index ea9bca57cc6..69d21f8b7bb 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -94,6 +94,11 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
0, REPORT_ALL_ERRORS, 1,
TRUE)))
DBUG_RETURN(1);
+ bitmap_set_bit(find_fields->field->table->read_set,
+ find_fields->field->field_index);
+ /* To make life easier when setting values in keys */
+ bitmap_set_bit(find_fields->field->table->write_set,
+ find_fields->field->field_index);
}
DBUG_RETURN(0);
}
@@ -272,7 +277,6 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
int count= 0;
int iindex_topic, iindex_relations;
Field *rtopic_id, *rkey_id;
-
DBUG_ENTER("get_topics_for_keyword");
if ((iindex_topic= find_type((char*) primary_key_name,
@@ -292,8 +296,9 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
rkey_id->store((longlong) key_id, TRUE);
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);
int key_res= relations->file->index_read(relations->record[0],
- (byte *)buff, rkey_id->pack_length(),
- HA_READ_KEY_EXACT);
+ (byte *) buff,
+ rkey_id->pack_length(),
+ HA_READ_KEY_EXACT);
for ( ;
!key_res && key_id == (int16) rkey_id->val_int() ;
@@ -653,13 +658,15 @@ bool mysqld_help(THD *thd, const char *mask)
if (open_and_lock_tables(thd, tables))
goto error;
+
/*
Init tables and fields to be usable from items
tables do not contain VIEWs => we can pass 0 as conds
*/
- setup_tables(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
- tables, 0, &leaves, FALSE);
+ if (setup_tables(thd, &thd->lex->select_lex.context,
+ &thd->lex->select_lex.top_join_list,
+ tables, &leaves, FALSE))
+ goto error;
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
if (init_fields(thd, tables, used_fields, array_elements(used_fields)))
goto error;
@@ -681,10 +688,12 @@ bool mysqld_help(THD *thd, const char *mask)
int key_id;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
- used_fields[help_keyword_name].field,&error)))
+ used_fields[help_keyword_name].field,
+ &error)))
goto error;
- count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id);
+ count_topics= search_keyword(thd,tables[3].table, used_fields, select,
+ &key_id);
delete select;
count_topics= (count_topics != 1) ? 0 :
get_topics_for_keyword(thd,tables[0].table,tables[2].table,
@@ -698,7 +707,8 @@ bool mysqld_help(THD *thd, const char *mask)
Field *cat_cat_id= used_fields[help_category_parent_category_id].field;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
- used_fields[help_category_name].field,&error)))
+ used_fields[help_category_name].field,
+ &error)))
goto error;
count_categories= search_categories(thd, tables[1].table, used_fields,
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 946c0536897..894a2a21efb 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -108,7 +108,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
No fields are provided so all fields must be provided in the values.
Thus we set all bits in the write set.
*/
- table->file->ha_set_all_bits_in_write_set();
+ bitmap_set_all(table->write_set);
}
else
{ // Part field list
@@ -123,7 +123,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
return -1;
}
- thd->dupp_field=0;
+ thd->dup_field= 0;
select_lex->no_wrap_view_item= TRUE;
/* Save the state of the current name resolution context. */
@@ -135,11 +135,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
*/
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- /*
- Indicate fields in list is to be updated by setting set_query_id
- parameter to 2. This sets the bit in the write_set for each field.
- */
- res= setup_fields(thd, 0, fields, 2, 0, 0);
+ res= setup_fields(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0);
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
@@ -167,16 +163,27 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
table_list->table= table= tbl->table;
}
- if (check_unique && thd->dupp_field)
+ if (check_unique && thd->dup_field)
{
- my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name);
+ my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dup_field->field_name);
return -1;
}
- if (table->timestamp_field && // Don't set timestamp if used
- table->timestamp_field->query_id == thd->query_id)
- clear_timestamp_auto_bits(table->timestamp_field_type,
- TIMESTAMP_AUTO_SET_ON_INSERT);
+ if (table->timestamp_field) // Don't automaticly set timestamp if used
+ {
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
+ clear_timestamp_auto_bits(table->timestamp_field_type,
+ TIMESTAMP_AUTO_SET_ON_INSERT);
+ else
+ {
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
+ }
+ }
}
+ if (table->found_next_number_field)
+ table->mark_auto_increment_column();
+ table->mark_columns_needed_for_insert();
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
@@ -217,40 +224,33 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
List<Item> &update_fields)
{
TABLE *table= insert_table_list->table;
- query_id_t timestamp_query_id;
- LINT_INIT(timestamp_query_id);
+ my_bool timestamp_mark;
- /*
- Change the query_id for the timestamp column so that we can
- check if this is modified directly.
- */
if (table->timestamp_field)
{
- timestamp_query_id= table->timestamp_field->query_id;
- table->timestamp_field->query_id= thd->query_id - 1;
+ /*
+ Unmark the timestamp field so that we can check if this is modified
+ by update_fields
+ */
+ timestamp_mark= bitmap_test_and_clear(table->write_set,
+ table->timestamp_field->field_index);
}
- /*
- Check the fields we are going to modify. This will set the query_id
- of all used fields to the threads query_id. It will also set all
- fields into the write set of this table.
- */
- if (setup_fields(thd, 0, update_fields, 2, 0, 0))
+ /* Check the fields we are going to modify */
+ if (setup_fields(thd, 0, update_fields, MARK_COLUMNS_WRITE, 0, 0))
return -1;
if (table->timestamp_field)
{
/* Don't set timestamp column if this is modified. */
- if (table->timestamp_field->query_id == thd->query_id)
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_UPDATE);
- else
- {
- table->timestamp_field->query_id= timestamp_query_id;
- table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
- }
+ if (timestamp_mark)
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
}
-
return 0;
}
@@ -269,8 +269,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
By default, both logs are enabled (this won't cause problems if the server
runs without --log-update or --log-bin).
*/
- bool log_on= (thd->options & OPTION_BIN_LOG) ||
- (!(thd->security_ctx->master_access & SUPER_ACL));
+ bool log_on= ((thd->options & OPTION_BIN_LOG) ||
+ (!(thd->security_ctx->master_access & SUPER_ACL)));
bool transactional_table, joins_freed= FALSE;
bool changed;
uint value_count;
@@ -380,7 +380,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto abort;
}
- if (setup_fields(thd, 0, *values, 0, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
goto abort;
}
its.rewind ();
@@ -753,7 +753,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
*/
static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
- List<Item> &fields, COND **where,
+ List<Item> &fields,
bool select_insert)
{
bool insert_into_view= (table_list->view != 0);
@@ -761,7 +761,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, where, &thd->lex->select_lex.leaf_tables,
+ table_list, &thd->lex->select_lex.leaf_tables,
select_insert))
DBUG_RETURN(TRUE);
@@ -851,8 +851,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
- if (mysql_prepare_insert_check_table(thd, table_list, fields, where,
- select_insert))
+ if (mysql_prepare_insert_check_table(thd, table_list, fields, select_insert))
DBUG_RETURN(TRUE);
/* Save the state of the current name resolution context. */
@@ -869,7 +868,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
if (values &&
!(res= check_insert_fields(thd, context->table_list, fields, *values,
!insert_into_view) ||
- setup_fields(thd, 0, *values, 0, 0, 0)) &&
+ setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) &&
duplic == DUP_UPDATE)
{
select_lex->no_wrap_view_item= TRUE;
@@ -887,7 +886,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
next_name_resolution_table= ctx_state.save_next_local;
}
if (!res)
- res= setup_fields(thd, 0, update_values, 1, 0, 0);
+ res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0);
}
/* Restore the current context. */
@@ -912,7 +911,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
select_lex->first_execution= 0;
}
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
- table->file->ha_retrieve_all_pk();
+ table->prepare_for_position();
DBUG_RETURN(FALSE);
}
@@ -959,9 +958,12 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
{
int error, trg_error= 0;
char *key=0;
+ MY_BITMAP *save_read_set, *save_write_set;
DBUG_ENTER("write_record");
info->records++;
+ save_read_set= table->read_set;
+ save_write_set= table->write_set;
if (info->handle_duplicates == DUP_REPLACE ||
info->handle_duplicates == DUP_UPDATE)
@@ -977,6 +979,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
error=HA_WRITE_SKIP; /* Database can't find key */
goto err;
}
+ /* Read all columns for the row we are going to replace */
+ table->use_all_columns();
/*
Don't allow REPLACE to replace a row when a auto_increment column
was used. This ensures that we don't get a problem when the
@@ -987,9 +991,9 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
key_nr == table->s->next_number_index &&
table->file->auto_increment_column_changed)
goto err;
- if (table->file->table_flags() & HA_DUPP_POS)
+ if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
- if (table->file->rnd_pos(table->record[1],table->file->dupp_ref))
+ if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
goto err;
}
else
@@ -1050,7 +1054,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
thd->clear_next_insert_id= 0;
thd->next_insert_id= 0;
}
- if ((error=table->file->ha_update_row(table->record[1],table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
goto ok_or_after_trg_err;
@@ -1127,6 +1132,13 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
TRG_ACTION_AFTER, TRUE));
+ /*
+ Restore column maps if they where replaced during an duplicate key
+ problem.
+ */
+ if (table->read_set != save_read_set ||
+ table->write_set != save_write_set)
+ table->column_bitmaps_set(save_read_set, save_write_set);
}
else if ((error=table->file->ha_write_row(table->record[0])))
{
@@ -1160,6 +1172,7 @@ err:
before_trg_err:
if (key)
my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
+ table->column_bitmaps_set(save_read_set, save_write_set);
DBUG_RETURN(1);
}
@@ -1172,9 +1185,11 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list)
{
int err= 0;
+ MY_BITMAP *write_set= entry->write_set;
+
for (Field **field=entry->field ; *field ; field++)
{
- if ((*field)->query_id != thd->query_id &&
+ if (!bitmap_is_set(write_set, (*field)->field_index) &&
((*field)->flags & NO_DEFAULT_VALUE_FLAG) &&
((*field)->real_type() != FIELD_TYPE_ENUM))
{
@@ -1506,6 +1521,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
Field **field,**org_field, *found_next_number_field;
TABLE *copy;
TABLE_SHARE *share= table->s;
+ byte *bitmap;
/* First request insert thread to get a lock */
status=1;
@@ -1532,14 +1548,16 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
client_thd->proc_info="allocating local table";
copy= (TABLE*) client_thd->alloc(sizeof(*copy)+
(share->fields+1)*sizeof(Field**)+
- share->reclength);
+ share->reclength +
+ share->column_bitmap_size*2);
if (!copy)
goto error;
*copy= *table;
/* We don't need to change the file handler here */
- field=copy->field=(Field**) (copy+1);
- copy->record[0]=(byte*) (field+share->fields+1);
+ field= copy->field= (Field**) (copy+1);
+ bitmap= (byte*) (field+share->fields+1);
+ copy->record[0]= (bitmap+ share->column_bitmap_size*2);
memcpy((char*) copy->record[0],(char*) table->record[0],share->reclength);
/* Make a copy of all fields */
@@ -1568,13 +1586,21 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type();
}
-
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
/* Adjust lock_count. This table object is not part of a lock. */
copy->lock_count= 0;
+ /* Adjust bitmaps */
+ copy->def_read_set.bitmap= (my_bitmap_map*) bitmap;
+ copy->def_write_set.bitmap= ((my_bitmap_map*)
+ (bitmap + share->column_bitmap_size));
+ copy->tmp_set.bitmap= 0; // To catch errors
+ bzero((char*) bitmap, share->column_bitmap_size*2);
+ copy->read_set= &copy->def_read_set;
+ copy->write_set= &copy->def_write_set;
+
return copy;
/* Got fatal error */
@@ -1742,7 +1768,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
thd->fatal_error(); // Abort waiting inserts
goto err;
}
- if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
+ if (!(di->table->file->ha_table_flags() & HA_CAN_INSERT_DELAYED))
{
thd->fatal_error();
my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.table_name);
@@ -1952,6 +1978,7 @@ bool delayed_insert::handle_inserts(void)
pthread_mutex_unlock(&mutex);
table->next_number_field=table->found_next_number_field;
+ table->use_all_columns();
thd.proc_info="upgrading lock";
if (thr_upgrade_write_delay_lock(*thd.lock->locks))
@@ -2058,7 +2085,6 @@ bool delayed_insert::handle_inserts(void)
}
thd.proc_info=0;
- table->next_number_field=0;
pthread_mutex_unlock(&mutex);
/* After releasing the mutex, to prevent deadlocks. */
@@ -2181,7 +2207,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
lex->current_select= &lex->select_lex;
res= check_insert_fields(thd, table_list, *fields, values,
!insert_into_view) ||
- setup_fields(thd, 0, values, 0, 0, 0);
+ setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0);
if (info.handle_duplicates == DUP_UPDATE)
{
@@ -2211,7 +2237,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
context->first_name_resolution_table->
next_name_resolution_table= ctx_state.save_next_local;
}
- res= res || setup_fields(thd, 0, *info.update_values, 1, 0, 0);
+ res= res || setup_fields(thd, 0, *info.update_values, MARK_COLUMNS_READ,
+ 0, 0);
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
@@ -2264,16 +2291,6 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
check_that_all_fields_are_given_values(thd, table, table_list)) ||
table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd));
-
- /*
- For non-transactional non-temporary tables, we set the
- OPTION_STATUS_NO_TRANS_UPDATE flag here. The send_eof() function
- is used by both the select_insert and the select_create classes,
- so setting it there would clash.
- */
- if (!(table->file->has_transactions() || table->s->tmp_table))
- thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
-
DBUG_RETURN(res);
}
@@ -2391,7 +2408,9 @@ void select_insert::send_error(uint errcode,const char *err)
{
DBUG_ENTER("select_insert::send_error");
- my_message(errcode, err, MYF(0));
+ /* Avoid an extra 'unknown error' message if we already reported an error */
+ if (errcode != ER_UNKNOWN_ERROR && !thd->net.report_error)
+ my_message(errcode, err, MYF(0));
if (!table)
{
@@ -2419,11 +2438,8 @@ void select_insert::send_error(uint errcode,const char *err)
INSERT-SELECT.
When replicating a CREATE-SELECT statement, we shall not write the
- events to the binary log. To prevent the ha_rollback_stmt() below
- from writing to the binary log, we have to pretend that the table
- is transactional, even if it actually is not. Therefore, the
- OPTION_STATUS_NO_TRANS_UPDATE is cleared in
- select_create::prepare() and will remain cleared here.
+ events to the binary log and should thus not set
+ OPTION_STATUS_NO_TRANS_UPDATE.
When replicating INSERT-SELECT, we shall not write the events to
the binary log for transactional table, but shall write all events
@@ -2431,22 +2447,22 @@ void select_insert::send_error(uint errcode,const char *err)
this case, the OPTION_STATUS_NO_TRANS_UPDATE is set if there is a
write to a non-transactional table, otherwise it is cleared.
*/
- if ((info.copied || info.deleted || info.updated) &&
- !table->file->has_transactions())
+ if (info.copied || info.deleted || info.updated)
{
- if (last_insert_id)
- thd->insert_id(last_insert_id); // For binary log
- if (mysql_bin_log.is_open())
+ if (!table->file->has_transactions())
{
- thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
- table->file->has_transactions(), FALSE);
+ if (last_insert_id)
+ thd->insert_id(last_insert_id); // For binary log
+ if (mysql_bin_log.is_open())
+ {
+ thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
+ table->file->has_transactions(), FALSE);
+ }
+ if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table &&
+ !can_rollback_data())
+ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ query_cache_invalidate3(thd, table, 1);
}
- if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table)
- thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
- }
- if (info.copied || info.deleted || info.updated)
- {
- query_cache_invalidate3(thd, table, 1);
}
ha_rollback_stmt(thd);
DBUG_VOID_RETURN;
@@ -2461,22 +2477,25 @@ bool select_insert::send_eof()
error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- /*
- We must invalidate the table in the query cache before binlog writing
- and ha_autocommit_or_rollback.
-
- If nothing was inserted in the table, there is no need to emit a
- ROLLBACK statement to the binary log, so in that case we clear
- OPTION_STATUS_NO_TRANS_UPDATE.
-
- Observe that select_insert::send_eof() is used by both
- select_insert and select_create and that they set the flag in
- different manners. See Note 1 below for more info.
- */
if (info.copied || info.deleted || info.updated)
+ {
+ /*
+ We must invalidate the table in the query cache before binlog writing
+ and ha_autocommit_or_rollback.
+ */
query_cache_invalidate3(thd, table, 1);
- else
- thd->options&= ~OPTION_STATUS_NO_TRANS_UPDATE;
+ /*
+ Mark that we have done permanent changes if all of the below is true
+ - Table doesn't support transactions
+ - It's a normal (not temporary) table. (Changes to temporary tables
+ are not logged in RBR)
+ - We are using statement based replication
+ */
+ if (!table->file->has_transactions() &&
+ (!table->s->tmp_table ||
+ !thd->current_stmt_binlog_row_based))
+ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ }
if (last_insert_id)
thd->insert_id(last_insert_id); // For binary log
@@ -2518,31 +2537,30 @@ bool select_insert::send_eof()
CREATE TABLE (SELECT) ...
***************************************************************************/
-int
-select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
+class MY_HOOKS : public TABLEOP_HOOKS
{
- DBUG_ENTER("select_create::prepare");
-
- class MY_HOOKS : public TABLEOP_HOOKS {
- public:
- MY_HOOKS(select_create *x) : ptr(x) { }
- virtual void do_prelock(TABLE **tables, uint count)
+public:
+ MY_HOOKS(select_create *x) : ptr(x) { }
+ virtual void do_prelock(TABLE **tables, uint count)
{
if (ptr->get_thd()->current_stmt_binlog_row_based)
ptr->binlog_show_create_table(tables, count);
}
- private:
- select_create *ptr;
- };
+private:
+ select_create *ptr;
+};
+
+int select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
+{
MY_HOOKS hooks(this);
+ DBUG_ENTER("select_create::prepare");
unit= u;
- table= create_table_from_items(thd, create_info, create_table,
- extra_fields, keys, &values, &lock,
- &hooks);
- if (!table)
+ if (!(table= create_table_from_items(thd, create_info, create_table,
+ extra_fields, keys, &values, &lock,
+ &hooks)))
DBUG_RETURN(-1); // abort() deletes table
if (table->s->fields < values.elements)
@@ -2551,16 +2569,15 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_RETURN(-1);
}
- /* First field to copy */
+ /* First field to copy */
field= table->field+table->s->fields - values.elements;
/* Mark all fields that are given values */
for (Field **f= field ; *f ; f++)
- (*f)->query_id= thd->query_id;
+ bitmap_set_bit(table->write_set, (*f)->field_index);
/* Don't set timestamp if used */
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
table->next_number_field=table->found_next_number_field;
restore_record(table,s->default_values); // Get empty record
@@ -2579,8 +2596,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
}
-void
-select_create::binlog_show_create_table(TABLE **tables, uint count)
+void select_create::binlog_show_create_table(TABLE **tables, uint count)
{
/*
Note 1: In RBR mode, we generate a CREATE TABLE statement for the
@@ -2598,25 +2614,22 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
since there potentially are sub-selects or accesses to information
schema that will do a close_thread_tables(), destroying the
statement transaction cache.
-
- To ensure that the event kaboodle is not written to the binary log
- on rollback, we clear the OPTION_STATUS_NO_TRANS_UPDATE bit of
- thd->options.
- */
+ */
DBUG_ASSERT(thd->current_stmt_binlog_row_based);
DBUG_ASSERT(tables && *tables && count > 0);
- thd->options&= ~OPTION_STATUS_NO_TRANS_UPDATE;
char buf[2048];
String query(buf, sizeof(buf), system_charset_info);
- query.length(0); // Have to zero it since constructor doesn't
-
+ int result;
TABLE_LIST table_list;
+
memset(&table_list, 0, sizeof(table_list));
table_list.table = *tables;
+ query.length(0); // Have to zero it since constructor doesn't
- int result= store_create_info(thd, &table_list, &query, create_info);
+ result= store_create_info(thd, &table_list, &query, create_info);
DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */
+
thd->binlog_query(THD::STMT_QUERY_TYPE,
query.ptr(), query.length(),
/* is_trans */ TRUE,
@@ -2653,17 +2666,9 @@ bool select_create::send_eof()
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
VOID(pthread_mutex_lock(&LOCK_open));
mysql_unlock_tables(thd, lock);
- /*
- TODO:
- Check if we can remove the following two rows.
- We should be able to just keep the table in the table cache.
- */
if (!table->s->tmp_table)
{
- ulong version= table->s->version;
- hash_delete(&open_cache,(byte*) table);
- /* Tell threads waiting for refresh that something has happened */
- if (version != refresh_version)
+ if (close_thread_table(thd, &table))
VOID(pthread_cond_broadcast(&COND_refresh));
}
lock=0;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index bf8a6b8cfbe..5c5ecfbbcd0 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -121,7 +121,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
int error;
String *field_term=ex->field_term,*escaped=ex->escaped;
String *enclosed=ex->enclosed;
- Item *unused_conds= 0;
bool is_fifo=0;
#ifndef EMBEDDED_LIBRARY
LOAD_FILE_INFO lf_info;
@@ -155,7 +154,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, &unused_conds,
+ table_list,
&thd->lex->select_lex.leaf_tables, FALSE))
DBUG_RETURN(-1);
if (!table_list->table || // do not suport join view
@@ -187,51 +186,48 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table= table_list->table;
transactional_table= table->file->has_transactions();
+ if (table->found_next_number_field)
+ table->mark_auto_increment_column();
+
if (!fields_vars.elements)
{
Field **field;
for (field=table->field; *field ; field++)
fields_vars.push_back(new Item_field(*field));
- /*
- Since all fields are set we set all bits in the write set
- */
- table->file->ha_set_all_bits_in_write_set();
+ bitmap_set_all(table->write_set);
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/*
Let us also prepare SET clause, altough it is probably empty
in this case.
*/
- if (setup_fields(thd, 0, set_fields, 1, 0, 0) ||
- setup_fields(thd, 0, set_values, 1, 0, 0))
+ if (setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) ||
+ setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(TRUE);
}
else
{ // Part field list
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
- /*
- Indicate that both variables in field list and fields in update_list
- is to be included in write set of table. We do however set all bits
- in write set anyways since it is not allowed to specify NULLs in
- LOAD DATA
- */
- table->file->ha_set_all_bits_in_write_set();
- if (setup_fields(thd, 0, fields_vars, 2, 0, 0) ||
- setup_fields(thd, 0, set_fields, 2, 0, 0) ||
+ if (setup_fields(thd, 0, fields_vars, MARK_COLUMNS_WRITE, 0, 0) ||
+ setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) ||
check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(TRUE);
/*
Check whenever TIMESTAMP field with auto-set feature specified
explicitly.
*/
- if (table->timestamp_field &&
- table->timestamp_field->query_id == thd->query_id)
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
- /*
- Fix the expressions in SET clause. This should be done after
- check_that_all_fields_are_given_values() and setting use_timestamp
- since it may update query_id for some fields.
- */
- if (setup_fields(thd, 0, set_values, 1, 0, 0))
+ if (table->timestamp_field)
+ {
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+ else
+ {
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
+ }
+ }
+ /* Fix the expressions in SET clause */
+ if (setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc
index b457ff5a6d6..3d06030536f 100644
--- a/sql/sql_olap.cc
+++ b/sql/sql_olap.cc
@@ -155,9 +155,11 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex)
if (setup_tables(lex->thd, &select_lex->context, &select_lex->top_join_list,
(TABLE_LIST *)select_lex->table_list.first
- &select_lex->where, &select_lex->leaf_tables, FALSE) ||
- setup_fields(lex->thd, 0, select_lex->item_list, 1, &all_fields,1) ||
- setup_fields(lex->thd, 0, item_list_copy, 1, &all_fields, 1))
+ &select_lex->leaf_tables, FALSE) ||
+ setup_fields(lex->thd, 0, select_lex->item_list, MARK_COLUMNS_READ,
+ &all_fields,1) ||
+ setup_fields(lex->thd, 0, item_list_copy, MARK_COLUMNS_READ,
+ &all_fields, 1))
return -1;
if (select_lex->olap == CUBE_TYPE)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 8ee78578631..934faebf9a7 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -162,8 +162,9 @@ bool end_active_trans(THD *thd)
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
if (ha_commit(thd))
error=1;
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
}
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
DBUG_RETURN(error);
}
@@ -186,8 +187,7 @@ bool begin_trans(THD *thd)
else
{
LEX *lex= thd->lex;
- thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) |
- OPTION_BEGIN);
+ thd->options|= OPTION_BEGIN;
thd->server_status|= SERVER_STATUS_IN_TRANS;
if (lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT)
error= ha_start_consistent_snapshot(thd);
@@ -398,7 +398,7 @@ int check_user(THD *thd, enum enum_server_command command,
NO_ACCESS)) // authentication is OK
{
DBUG_PRINT("info",
- ("Capabilities: %d packet_length: %ld Host: '%s' "
+ ("Capabilities: %lx packet_length: %ld Host: '%s' "
"Login user: '%s' Priv_user: '%s' Using password: %s "
"Access: %u db: '%s'",
thd->client_capabilities,
@@ -1439,7 +1439,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion)
*/
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
res= ha_commit(thd);
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
break;
case COMMIT_RELEASE:
do_release= 1; /* fall through */
@@ -1456,7 +1457,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion)
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
if (ha_rollback(thd))
res= -1;
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
if (!res && (completion == ROLLBACK_AND_CHAIN))
res= begin_trans(thd);
break;
@@ -2735,8 +2737,7 @@ mysql_execute_command(THD *thd)
goto error;
if (end_active_trans(thd))
goto error;
- else
- res = load_master_data(thd);
+ res = load_master_data(thd);
break;
#endif /* HAVE_REPLICATION */
case SQLCOM_SHOW_ENGINE_STATUS:
@@ -2800,11 +2801,6 @@ mysql_execute_command(THD *thd)
break;
}
}
- else
- {
- /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
- }
DBUG_ASSERT(first_table == all_tables && first_table != 0);
bool link_to_local;
// Skip first table, which is the table we are creating
@@ -2918,6 +2914,9 @@ mysql_execute_command(THD *thd)
}
else
{
+ /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
+ if (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)
+ thd->options|= OPTION_KEEP_LOG;
/* regular create */
if (lex->like_name)
res= mysql_create_like_table(thd, create_table, &lex->create_info,
@@ -3497,7 +3496,7 @@ end_with_restore_list:
lex->drop_if_exists= 1;
/* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ thd->options|= OPTION_KEEP_LOG;
}
res= mysql_rm_table(thd, first_table, lex->drop_if_exists,
lex->drop_temporary);
@@ -4221,7 +4220,8 @@ end_with_restore_list:
res= TRUE; // cannot happen
else
{
- if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) &&
+ if ((thd->options &
+ (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)) &&
!thd->slave_thread)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
@@ -4952,7 +4952,8 @@ end_with_restore_list:
thd->transaction.xid_state.xa_state=XA_ACTIVE;
thd->transaction.xid_state.xid.set(thd->lex->xid);
xid_cache_insert(&thd->transaction.xid_state);
- thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) |
+ thd->options= ((thd->options & ~(OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG)) |
OPTION_BEGIN);
thd->server_status|= SERVER_STATUS_IN_TRANS;
send_ok(thd);
@@ -5046,7 +5047,8 @@ end_with_restore_list:
xa_state_names[thd->transaction.xid_state.xa_state]);
break;
}
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
xid_cache_delete(&thd->transaction.xid_state);
thd->transaction.xid_state.xa_state=XA_NOTR;
@@ -5076,7 +5078,8 @@ end_with_restore_list:
my_error(ER_XAER_RMERR, MYF(0));
else
send_ok(thd);
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
xid_cache_delete(&thd->transaction.xid_state);
thd->transaction.xid_state.xa_state=XA_NOTR;
@@ -5667,6 +5670,14 @@ void mysql_reset_thd_for_next_command(THD *thd)
thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS |
SERVER_QUERY_NO_INDEX_USED |
SERVER_QUERY_NO_GOOD_INDEX_USED);
+ /*
+ If in autocommit mode and not in a transaction, reset
+ OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG to not get warnings
+ in ha_rollback_trans() about some tables couldn't be rolled back.
+ */
+ if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+ thd->options&= ~(OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG);
+
DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx);
thd->tmp_table_used= 0;
if (!thd->in_sub_stmt)
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 71b8e9b1d95..40e4cd115a1 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -849,7 +849,7 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
context->table_list= tables;
context->first_name_resolution_table= tables;
context->last_name_resolution_table= NULL;
- func_expr->walk(&Item::change_context_processor, (byte*) context);
+ func_expr->walk(&Item::change_context_processor, 0, (byte*) context);
save_where= thd->where;
thd->where= "partition function";
error= func_expr->fix_fields(thd, (Item**)0);
@@ -1335,7 +1335,7 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
char db_name_string[FN_REFLEN];
char* db_name;
partition_info *part_info= table->part_info;
- ulong save_set_query_id= thd->set_query_id;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
Item *thd_free_list= thd->free_list;
DBUG_ENTER("fix_partition_func");
@@ -1343,8 +1343,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
{
DBUG_RETURN(FALSE);
}
- thd->set_query_id= 0;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
/*
Set-up the TABLE_LIST object to be a list with a single table
Set the object to zero to create NULL pointers and set alias
@@ -1484,8 +1484,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
result= FALSE;
end:
thd->free_list= thd_free_list;
- thd->set_query_id= save_set_query_id;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(result);
}
@@ -1990,10 +1990,7 @@ bool partition_key_modified(TABLE *table, List<Item> &fields)
in function
*/
-static
-inline
-longlong
-part_val_int(Item *item_expr)
+static inline longlong part_val_int(Item *item_expr)
{
longlong value= item_expr->val_int();
if (item_expr->null_value)
@@ -2406,10 +2403,12 @@ int get_partition_id_range(partition_info *part_info,
loc_part_id++;
*part_id= (uint32)loc_part_id;
*func_value= part_func_value;
- if (loc_part_id == max_partition)
- if (range_array[loc_part_id] != LONGLONG_MAX)
- if (part_func_value >= range_array[loc_part_id])
- DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+ if (loc_part_id == max_partition &&
+ range_array[loc_part_id] != LONGLONG_MAX &&
+ part_func_value >= range_array[loc_part_id])
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ DBUG_PRINT("exit",("partition: %d", *part_id));
DBUG_RETURN(0);
}
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 01faae22c57..b5050204761 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -758,6 +758,7 @@ void plugin_load(void)
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL, 1, 0);
+ table->use_all_columns();
while (!(error= read_record_info.read_record(&read_record_info)))
{
DBUG_PRINT("info", ("init plugin record"));
@@ -897,8 +898,8 @@ my_bool mysql_uninstall_plugin(THD *thd, LEX_STRING *name)
else
plugin->state= PLUGIN_IS_DELETED;
+ table->use_all_columns();
table->field[0]->store(name->str, name->length, system_charset_info);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (! table->file->index_read_idx(table->record[0], 0,
(byte *)table->field[0]->ptr,
table->key_info[0].key_length,
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 6d0a0f4799c..f6f4e5baa53 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -621,6 +621,7 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
param->value.cs_info.character_set_of_placeholder= &my_charset_bin;
param->value.cs_info.character_set_client=
thd->variables.character_set_client;
+ DBUG_ASSERT(thd->variables.character_set_client);
param->value.cs_info.final_character_set_of_str_value= &my_charset_bin;
param->item_type= Item::STRING_ITEM;
param->item_result_type= STRING_RESULT;
@@ -1066,7 +1067,7 @@ static bool mysql_test_insert(Prepared_statement *stmt,
its.rewind();
if (table_list->lock_type == TL_WRITE_DELAYED &&
- !(table_list->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
+ !(table_list->table->file->ha_table_flags() & HA_CAN_INSERT_DELAYED))
{
my_error(ER_ILLEGAL_HA, MYF(0), (table_list->view ?
table_list->view_name.str :
@@ -1081,7 +1082,7 @@ static bool mysql_test_insert(Prepared_statement *stmt,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto error;
}
- if (setup_fields(thd, 0, *values, 0, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0))
goto error;
}
}
@@ -1168,7 +1169,7 @@ static int mysql_test_update(Prepared_statement *stmt,
table_list->register_want_access(want_privilege);
#endif
thd->lex->select_lex.no_wrap_view_item= TRUE;
- res= setup_fields(thd, 0, select->item_list, 1, 0, 0);
+ res= setup_fields(thd, 0, select->item_list, MARK_COLUMNS_READ, 0, 0);
thd->lex->select_lex.no_wrap_view_item= FALSE;
if (res)
goto error;
@@ -1179,7 +1180,7 @@ static int mysql_test_update(Prepared_statement *stmt,
(SELECT_ACL & ~table_list->table->grant.privilege);
table_list->register_want_access(SELECT_ACL);
#endif
- if (setup_fields(thd, 0, stmt->lex->value_list, 0, 0, 0))
+ if (setup_fields(thd, 0, stmt->lex->value_list, MARK_COLUMNS_NONE, 0, 0))
goto error;
/* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(0);
@@ -1333,7 +1334,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
if (open_and_lock_tables(thd, tables))
DBUG_RETURN(TRUE);
- DBUG_RETURN(setup_fields(thd, 0, *values, 0, 0, 0));
+ DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0));
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 918c9f507e2..baea34f2d0b 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1295,12 +1295,12 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1,
bool mysql_show_binlog_events(THD* thd)
{
Protocol *protocol= thd->protocol;
- DBUG_ENTER("mysql_show_binlog_events");
List<Item> field_list;
const char *errmsg = 0;
bool ret = TRUE;
IO_CACHE log;
File file = -1;
+ DBUG_ENTER("mysql_show_binlog_events");
Log_event::init_show_field_list(&field_list);
if (protocol->send_fields(&field_list,
@@ -1354,12 +1354,12 @@ bool mysql_show_binlog_events(THD* thd)
pthread_mutex_lock(log_lock);
/*
- open_binlog() sought to position 4.
- Read the first event in case it's a Format_description_log_event, to
- know the format. If there's no such event, we are 3.23 or 4.x. This
- code, like before, can't read 3.23 binlogs.
- This code will fail on a mixed relay log (one which has Format_desc then
- Rotate then Format_desc).
+ open_binlog() sought to position 4.
+ Read the first event in case it's a Format_description_log_event, to
+ know the format. If there's no such event, we are 3.23 or 4.x. This
+ code, like before, can't read 3.23 binlogs.
+ This code will fail on a mixed relay log (one which has Format_desc then
+ Rotate then Format_desc).
*/
ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event);
@@ -1383,7 +1383,8 @@ bool mysql_show_binlog_events(THD* thd)
}
for (event_count = 0;
- (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event)); )
+ (ev = Log_event::read_log_event(&log,(pthread_mutex_t*) 0,
+ description_event)); )
{
if (event_count >= limit_start &&
ev->net_send(protocol, linfo.log_file_name, pos))
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 5f8c4dd2e1a..40e13762051 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -338,11 +338,10 @@ JOIN::prepare(Item ***rref_pointer_array,
if ((!(select_options & OPTION_SETUP_TABLES_DONE) &&
setup_tables(thd, &select_lex->context, join_list,
- tables_list, &conds, &select_lex->leaf_tables,
- FALSE)) ||
+ tables_list, &select_lex->leaf_tables, FALSE)) ||
setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) ||
select_lex->setup_ref_array(thd, og_num) ||
- setup_fields(thd, (*rref_pointer_array), fields_list, 1,
+ setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ,
&all_fields, 1) ||
setup_without_group(thd, (*rref_pointer_array), tables_list,
select_lex->leaf_tables, fields_list,
@@ -458,13 +457,6 @@ JOIN::prepare(Item ***rref_pointer_array,
goto err; /* purecov: inspected */
}
}
-#ifdef NOT_NEEDED
- else if (!group_list && procedure->flags & PROC_GROUP)
- {
- my_message(ER_NO_GROUP_FOR_PROC, MYF(0));
- goto err;
- }
-#endif
if (order && (procedure->flags & PROC_NO_SORT))
{ /* purecov: inspected */
my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC),
@@ -1004,11 +996,8 @@ JOIN::optimize()
*/
if (need_tmp || select_distinct || group_list || order)
{
- for (uint i_h = const_tables; i_h < tables; i_h++)
- {
- TABLE* table_h = join_tab[i_h].table;
- table_h->file->ha_retrieve_all_pk();
- }
+ for (uint i = const_tables; i < tables; i++)
+ join_tab[i].table->prepare_for_position();
}
DBUG_EXECUTE("info",TEST_join(this););
@@ -1075,7 +1064,7 @@ JOIN::optimize()
tmp_table_param.hidden_field_count= (all_fields.elements -
fields_list.elements);
- if (!(exec_tmp_table1 =
+ if (!(exec_tmp_table1=
create_tmp_table(thd, &tmp_table_param, all_fields,
((!simple_group && !procedure &&
!(test_flags & TEST_NO_KEY_GROUP)) ?
@@ -1791,9 +1780,7 @@ JOIN::destroy()
{
JOIN_TAB *tab, *end;
for (tab= join_tab, end= tab+tables ; tab != end ; tab++)
- {
tab->cleanup();
- }
}
tmp_join->tmp_join= 0;
tmp_table_param.copy_field=0;
@@ -2048,16 +2035,16 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
s->dependent= tables->dep_tables;
s->key_dependent= 0;
if (tables->schema_table)
- table->file->records= 2;
+ table->file->stats.records= 2;
s->on_expr_ref= &tables->on_expr;
if (*s->on_expr_ref)
{
/* s is the only inner table of an outer join */
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if ((!table->file->records || table->no_partitions_used) && !embedding)
+ if ((!table->file->stats.records || table->no_partitions_used) && !embedding)
#else
- if (!table->file->records && !embedding)
+ if (!table->file->stats.records && !embedding)
#endif
{ // Empty table
s->dependent= 0; // Ignore LEFT JOIN depend.
@@ -2090,10 +2077,10 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
#else
const bool no_partitions_used= FALSE;
#endif
- if ((table->s->system || table->file->records <= 1 ||
+ if ((table->s->system || table->file->stats.records <= 1 ||
no_partitions_used) &&
!s->dependent &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->fulltext_searched)
{
set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2182,8 +2169,8 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
// All dep. must be constants
if (s->dependent & ~(found_const_table_map))
continue;
- if (table->file->records <= 1L &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ if (table->file->stats.records <= 1L &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->pos_in_table_list->embedding)
{ // system table
int tmp= 0;
@@ -2271,7 +2258,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
continue;
}
/* Approximate found rows and time to read them */
- s->found_records=s->records=s->table->file->records;
+ s->found_records=s->records=s->table->file->stats.records;
s->read_time=(ha_rows) s->table->file->scan_time();
/*
@@ -3198,7 +3185,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
if (map == 1) // Only one table
{
TABLE *tmp_table=join->all_tables[tablenr];
- keyuse->ref_table_rows= max(tmp_table->file->records, 100);
+ keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
}
}
/*
@@ -3243,7 +3230,7 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
if (join->group_list)
{ /* Collect all query fields referenced in the GROUP clause. */
for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
- (*cur_group->item)->walk(&Item::collect_item_field_processor,
+ (*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
(byte*) &indexed_fields);
}
else if (join->select_distinct)
@@ -3252,7 +3239,8 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
List_iterator<Item> select_items_it(select_items);
Item *item;
while ((item= select_items_it++))
- item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields);
+ item->walk(&Item::collect_item_field_processor, 0,
+ (byte*) &indexed_fields);
}
else
return;
@@ -3469,7 +3457,7 @@ best_access_path(JOIN *join,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -3559,7 +3547,7 @@ best_access_path(JOIN *join,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -3613,7 +3601,7 @@ best_access_path(JOIN *join,
if ((records >= s->found_records || best > s->read_time) && // (1)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
- !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
+ !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
! s->table->used_keys.is_clear_all() && best_key) && // (3)
!(s->table->force_index && best_key && !s->quick)) // (4)
{ // Check full join
@@ -4496,7 +4484,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp=record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -4582,7 +4570,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp=record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -4619,7 +4607,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if ((records >= s->found_records || best > s->read_time) &&
!(s->quick && best_key && s->quick->index == best_key->key &&
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&
- !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) &&
+ !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) &&
! s->table->used_keys.is_clear_all() && best_key) &&
!(s->table->force_index && best_key && !s->quick))
{ // Check full join
@@ -4746,12 +4734,13 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
{
uint null_fields,blobs,fields,rec_length;
- null_fields=blobs=fields=rec_length=0;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= join_tab->table->read_set;;
+
+ null_fields= blobs= fields= rec_length=0;
for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
{
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
uint flags=field->flags;
fields++;
@@ -4768,7 +4757,7 @@ static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
rec_length+=sizeof(my_bool);
if (blobs)
{
- uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
+ uint blob_length=(uint) (join_tab->table->file->stats.mean_rec_length-
(join_tab->table->s->reclength- rec_length));
rec_length+=(uint) max(4,blob_length);
}
@@ -5059,8 +5048,12 @@ bool
store_val_in_field(Field *field,Item *item)
{
bool error;
- THD *thd= field->table->in_use;
+ TABLE *table= field->table;
+ THD *thd= table->in_use;
ha_rows cuted_fields=thd->cuted_fields;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
+
/*
we should restore old value of count_cuted_fields because
store_val_in_field can be called from mysql_insert
@@ -5070,6 +5063,7 @@ store_val_in_field(Field *field,Item *item)
thd->count_cuted_fields= CHECK_FIELD_WARN;
error= item->save_in_field(field, 1);
thd->count_cuted_fields= old_count_cuted_fields;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return error || cuted_fields != thd->cuted_fields;
}
@@ -5407,7 +5401,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
DBUG_RETURN(1);
tmp->quick_fix_field();
cond_tab->select_cond= !cond_tab->select_cond ? tmp :
- new Item_cond_and(cond_tab->select_cond,tmp);
+ new Item_cond_and(cond_tab->select_cond,
+ tmp);
if (!cond_tab->select_cond)
DBUG_RETURN(1);
cond_tab->select_cond->quick_fix_field();
@@ -5712,7 +5707,6 @@ static void
make_join_readinfo(JOIN *join, uint options)
{
uint i;
-
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
@@ -8518,6 +8512,29 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
}
+/*
+ Set up column usage bitmaps for a temporary table
+
+ IMPLEMENTATION
+ For temporary tables, we need one bitmap with all columns set and
+ a tmp_set bitmap to be used by things like filesort.
+*/
+
+void setup_tmp_table_column_bitmaps(TABLE *table, byte *bitmaps)
+{
+ uint field_count= table->s->fields;
+ bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ bitmap_init(&table->tmp_set,
+ (my_bitmap_map*) (bitmaps+ bitmap_buffer_size(field_count)),
+ field_count, FALSE);
+ /* write_set and all_set are copies of read_set */
+ table->def_write_set= table->def_read_set;
+ table->s->all_set= table->def_read_set;
+ bitmap_set_all(&table->s->all_set);
+ table->default_column_bitmaps();
+}
+
/*
Create a temp table according to a field list.
@@ -8572,7 +8589,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
bool use_packed_rows= 0;
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
char *tmpname,path[FN_REFLEN];
- byte *pos,*group_buff;
+ byte *pos, *group_buff, *bitmaps;
uchar *null_flags;
Field **reg_field, **from_field;
uint *blob_field;
@@ -8584,9 +8601,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
uint total_uneven_bit_length= 0;
bool force_copy_fields= param->force_copy_fields;
DBUG_ENTER("create_tmp_table");
- DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
- (int) distinct, (int) save_sum_fields,
- (ulong) rows_limit,test(group)));
+ DBUG_PRINT("enter",
+ ("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
+ (int) distinct, (int) save_sum_fields,
+ (ulong) rows_limit,test(group)));
statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status);
@@ -8653,8 +8671,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
- &group_buff, group && ! using_unique_constraint ?
- param->group_length : 0,
+ &group_buff, (group && ! using_unique_constraint ?
+ param->group_length : 0),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
NullS))
{
bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
@@ -8755,7 +8774,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
if (new_field->type() == FIELD_TYPE_BIT)
total_uneven_bit_length+= new_field->field_length & 7;
- new_field->field_index= (uint) (reg_field - table->field);
*(reg_field++)= new_field;
if (new_field->real_type() == MYSQL_TYPE_STRING ||
new_field->real_type() == MYSQL_TYPE_VARCHAR)
@@ -8775,8 +8793,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
(*argp)->maybe_null=1;
}
- new_field->query_id= thd->query_id;
- new_field->fieldnr= ++fieldnr;
+ new_field->field_index= fieldnr++;
}
}
}
@@ -8820,7 +8837,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
total_uneven_bit_length+= new_field->field_length & 7;
if (new_field->flags & BLOB_FLAG)
{
- *blob_field++= (uint) (reg_field - table->field);
+ *blob_field++= fieldnr;
blob_count++;
}
if (item->marker == 4 && item->maybe_null)
@@ -8828,10 +8845,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
group_null_items++;
new_field->flags|= GROUP_FLAG;
}
- new_field->query_id= thd->query_id;
- new_field->fieldnr= ++fieldnr;
- new_field->field_index= (uint) (reg_field - table->field);
- *(reg_field++) =new_field;
+ new_field->field_index= fieldnr++;
+ *(reg_field++)= new_field;
}
if (!--hidden_field_count)
{
@@ -8844,12 +8859,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
We need to update hidden_field_count as we may have stored group
functions with constant arguments
*/
- param->hidden_field_count= (uint) (reg_field - table->field);
+ param->hidden_field_count= fieldnr;
null_count= 0;
}
}
+ DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
- field_count= (uint) (reg_field - table->field);
+ field_count= fieldnr;
*blob_field= 0; // End marker
share->fields= field_count;
@@ -8873,6 +8889,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!table->file)
goto err;
+
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
@@ -8910,6 +8927,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
copy_func[0]=0; // End marker
+ setup_tmp_table_column_bitmaps(table, bitmaps);
+
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
pos=table->record[0]+ null_pack_length;
@@ -9146,8 +9165,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (open_tmp_table(table))
goto err;
- table->file->ha_set_all_bits_in_read_set();
- table->file->ha_set_all_bits_in_write_set();
thd->mem_root= mem_root_save;
DBUG_RETURN(table);
@@ -9193,22 +9210,28 @@ TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list)
uint record_length= 0;
uint null_count= 0; /* number of columns which may be null */
uint null_pack_length; /* NULL representation array length */
+ uint *blob_field;
+ byte *bitmaps;
+ TABLE *table;
TABLE_SHARE *share;
- /* Create the table and list of all fields */
- TABLE *table= (TABLE*) thd->calloc(sizeof(*table)+sizeof(*share));
- field= (Field**) thd->alloc((field_count + 1) * sizeof(Field*));
- if (!table || !field)
+
+ if (!multi_alloc_root(thd->mem_root,
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &field, (field_count + 1) * sizeof(Field*),
+ &blob_field, (field_count+1) *sizeof(uint),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
+ NullS))
return 0;
+ bzero(table, sizeof(*table));
+ bzero(share, sizeof(*share));
table->field= field;
- table->s= share= (TABLE_SHARE*) (table+1);
+ table->s= share;
+ share->blob_field= blob_field;
share->fields= field_count;
-
- if (!(share->blob_field= (uint*)thd->alloc((field_list.elements + 1) *
- sizeof(uint))))
- return 0;
-
share->blob_ptr_size= mi_portable_sizeof_char_ptr;
+ setup_tmp_table_column_bitmaps(table, bitmaps);
/* Create all fields and calculate the total length of record */
List_iterator_fast<create_field> it(field_list);
@@ -9495,8 +9518,8 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
To use start_bulk_insert() (which is new in 4.1) we need to find
all places where a corresponding end_bulk_insert() should be put.
*/
- table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */
- new_table.file->start_bulk_insert(table->file->records);
+ table->file->info(HA_STATUS_VARIABLE); /* update table->file->stats.records */
+ new_table.file->start_bulk_insert(table->file->stats.records);
#else
/* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
@@ -9510,11 +9533,11 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*/
while (!table->file->rnd_next(new_table.record[1]))
{
- if ((write_err=new_table.file->ha_write_row(new_table.record[1])))
+ if ((write_err= new_table.file->write_row(new_table.record[1])))
goto err;
}
/* copy row that filled HEAP table */
- if ((write_err=new_table.file->ha_write_row(table->record[0])))
+ if ((write_err=new_table.file->write_row(table->record[0])))
{
if (write_err != HA_ERR_FOUND_DUPP_KEY &&
write_err != HA_ERR_FOUND_DUPP_UNIQUE || !ignore_last_dupp_key_error)
@@ -9531,6 +9554,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*table= new_table;
*table->s= share;
table->file->change_table_ptr(table, table->s);
+ table->use_all_columns();
if (save_proc_info)
thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
"Copying to tmp table on disk" : save_proc_info);
@@ -10361,7 +10385,7 @@ join_read_const(JOIN_TAB *tab)
if (table->status & STATUS_GARBAGE) // If first read
{
table->status= 0;
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
error=HA_ERR_KEY_NOT_FOUND;
else
{
@@ -10434,7 +10458,7 @@ join_read_always_key(JOIN_TAB *tab)
{
table->file->ha_index_init(tab->ref.key, tab->sorted);
}
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
tab->ref.key_buff,
@@ -10461,7 +10485,7 @@ join_read_last_key(JOIN_TAB *tab)
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key, tab->sorted);
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
tab->ref.key_buff,
@@ -10635,8 +10659,9 @@ join_ft_read_first(JOIN_TAB *tab)
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key, 1);
#if NOT_USED_YET
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's
- return -1; // see also FT_SELECT::init()
+ /* as ft-key doesn't use store_key's, see also FT_SELECT::init() */
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
+ return -1;
#endif
table->file->ft_init();
@@ -10743,7 +10768,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
&& !join->send_group_parts && !join->having && !jt->select_cond &&
!(jt->select && jt->select->quick) &&
- !(jt->table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
(jt->ref.key < 0))
{
/* Join over all rows in table; Return number of found rows */
@@ -10759,7 +10784,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
else
{
table->file->info(HA_STATUS_VARIABLE);
- join->send_records = table->file->records;
+ join->send_records= table->file->stats.records;
}
}
else
@@ -10935,7 +10960,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
- if ((error=table->file->ha_write_row(table->record[0])))
+ if ((error=table->file->write_row(table->record[0])))
{
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)
@@ -10997,8 +11022,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -11021,7 +11046,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy);
- if ((error=table->file->ha_write_row(table->record[0])))
+ if ((error=table->file->write_row(table->record[0])))
{
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error, 0))
@@ -11057,7 +11082,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_fields(&join->tmp_table_param); // Groups are copied twice.
copy_funcs(join->tmp_table_param.items_to_copy);
- if (!(error=table->file->ha_write_row(table->record[0])))
+ if (!(error=table->file->write_row(table->record[0])))
join->send_records++; // New group
else
{
@@ -11066,15 +11091,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
- if (table->file->rnd_pos(table->record[1],table->file->dupp_ref))
+ if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -11117,7 +11142,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->sum_funcs_end[send_group_parts]);
if (!join->having || join->having->val_int())
{
- int error= table->file->ha_write_row(table->record[0]);
+ int error= table->file->write_row(table->record[0]);
if (error && create_myisam_from_heap(join->thd, table,
&join->tmp_table_param,
error, 0))
@@ -11614,6 +11639,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (!select->quick->reverse_sorted())
{
+ QUICK_SELECT_DESC *tmp;
int quick_type= select->quick->get_type();
if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE ||
quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
@@ -11622,8 +11648,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
DBUG_RETURN(0); // Use filesort
/* ORDER BY range_key DESC */
- QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
- used_key_parts);
+ tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
+ used_key_parts);
if (!tmp || tmp->error)
{
delete tmp;
@@ -11663,7 +11689,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
resolved with a key; This is because filesort() is usually faster than
retrieving all rows through an index.
*/
- if (select_limit >= table->file->records)
+ if (select_limit >= table->file->stats.records)
{
keys= *table->file->keys_to_use_for_scanning();
keys.merge(table->used_keys);
@@ -11804,7 +11830,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
table->sort.found_records=filesort(thd, table,sortorder, length,
- select, filesort_limit, &examined_rows);
+ select, filesort_limit, 0,
+ &examined_rows);
tab->records= table->sort.found_records; // For SQL_CALC_ROWS
if (select)
{
@@ -11938,7 +11965,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
entry->file->info(HA_STATUS_VARIABLE);
if (entry->s->db_type == &heap_hton ||
(!entry->s->blob_fields &&
- ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records <
+ ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
thd->variables.sortbuff_size)))
error=remove_dup_with_hash_index(join->thd, entry,
field_count, first_field,
@@ -11985,7 +12012,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (having && !having->val_int())
{
- if ((error=file->ha_delete_row(record)))
+ if ((error=file->delete_row(record)))
goto err;
error=file->rnd_next(record);
continue;
@@ -12012,7 +12039,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
- if ((error=file->ha_delete_row(record)))
+ if ((error=file->delete_row(record)))
goto err;
}
else if (!found)
@@ -12059,7 +12086,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (!my_multi_malloc(MYF(MY_WME),
&key_buffer,
(uint) ((key_length + extra_length) *
- (long) file->records),
+ (long) file->stats.records),
&field_lengths,
(uint) (field_count*sizeof(*field_lengths)),
NullS))
@@ -12081,7 +12108,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
extra_length= ALIGN_SIZE(key_length)-key_length;
}
- if (hash_init(&hash, &my_charset_bin, (uint) file->records, 0,
+ if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
key_length, (hash_get_key) 0, 0, 0))
{
my_free((char*) key_buffer,MYF(0));
@@ -12109,7 +12136,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
if (having && !having->val_int())
{
- if ((error=file->ha_delete_row(record)))
+ if ((error=file->delete_row(record)))
goto err;
continue;
}
@@ -12126,7 +12153,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
- if ((error=file->ha_delete_row(record)))
+ if ((error=file->delete_row(record)))
goto err;
}
else
@@ -12229,14 +12256,14 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
for (i=0 ; i < table_count ; i++)
{
uint null_fields=0,used_fields;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= tables[i].table->read_set;
for (f_ptr=tables[i].table->field,used_fields=tables[i].used_fields ;
used_fields ;
f_ptr++)
{
field= *f_ptr;
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
used_fields--;
length+=field->fill_cache_field(copy);
@@ -12435,7 +12462,8 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
{
memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length);
}
- if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, &tab->ref)) ||
+ if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, tab->table,
+ &tab->ref)) ||
diff)
return 1;
return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length)
@@ -12444,20 +12472,24 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
bool
-cp_buffer_from_ref(THD *thd, TABLE_REF *ref)
+cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ bool result= 0;
+
for (store_key **copy=ref->key_copy ; *copy ; copy++)
{
if ((*copy)->copy() & 1)
{
- thd->count_cuted_fields= save_count_cuted_fields;
- return 1; // Something went wrong
+ result= 1;
+ break;
}
}
thd->count_cuted_fields= save_count_cuted_fields;
- return 0;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ return result;
}
@@ -12741,11 +12773,11 @@ setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_field)
{
Item **item;
- DBUG_ENTER("setup_new_fields");
-
- thd->set_query_id=1; // Not really needed, but...
uint counter;
bool not_used;
+ DBUG_ENTER("setup_new_fields");
+
+ thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but...
for (; new_field ; new_field= new_field->next)
{
if ((item= find_item_in_list(*new_field->item, fields, &counter,
@@ -13962,7 +13994,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
- if ((error= table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->write_row(table->record[0])))
{
if (create_myisam_from_heap(thd, table, &tmp_table_param,
error, 0))
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 459d2ff89a8..6292977c209 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -56,16 +56,16 @@ typedef struct st_table_ref
*/
key_part_map null_rejecting;
table_map depend_map; // Table depends on these tables.
- byte *null_ref_key; // null byte position in the key_buf.
- // used for REF_OR_NULL optimization.
+ /* null byte position in the key_buf. Used for REF_OR_NULL optimization */
+ byte *null_ref_key;
} TABLE_REF;
+
/*
-** CACHE_FIELD and JOIN_CACHE is used on full join to cache records in outer
-** table
+ CACHE_FIELD and JOIN_CACHE is used on full join to cache records in outer
+ table
*/
-
typedef struct st_cache_field {
char *str;
uint length,blob_length;
@@ -83,7 +83,7 @@ typedef struct st_join_cache {
/*
-** The structs which holds the join connections and join states
+ The structs which holds the join connections and join states
*/
enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF,
JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL,
@@ -103,6 +103,7 @@ typedef enum_nested_loop_state
typedef int (*Read_record_func)(struct st_join_table *tab);
Next_select_func setup_end_select_func(JOIN *join);
+
typedef struct st_join_table {
st_join_table() {} /* Remove gcc warning */
TABLE *table;
@@ -482,7 +483,11 @@ class store_key_field: public store_key
}
enum store_key_result copy()
{
+ TABLE *table= copy_field.to_field->table;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
copy_field.do_copy(&copy_field);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK;
}
const char *name() const { return field_name; }
@@ -502,7 +507,11 @@ public:
{}
enum store_key_result copy()
{
+ TABLE *table= to_field->table;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
int res= item->save_in_field(to_field, 1);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res);
}
@@ -539,7 +548,7 @@ public:
const char *name() const { return "const"; }
};
-bool cp_buffer_from_ref(THD *thd, TABLE_REF *ref);
+bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref);
bool error_if_full_join(JOIN *join);
int report_error(TABLE *table, int error);
int safe_index_read(JOIN_TAB *tab);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 4071f86989f..0eb1322396a 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -704,6 +704,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
field_list.push_back(new Item_field(field));
}
restore_record(table, s->default_values); // Get empty record
+ table->use_all_columns();
if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS |
Protocol::SEND_EOF))
DBUG_VOID_RETURN;
@@ -917,9 +918,9 @@ static void append_directory(THD *thd, String *packet, const char *dir_type,
RETURN
0 OK
*/
-int
-store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
- HA_CREATE_INFO *create_info_arg)
+
+int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
+ HA_CREATE_INFO *create_info_arg)
{
List<Item> field_list;
char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, uname[NAME_LEN*3+1];
@@ -938,6 +939,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
MODE_DB2 |
MODE_MAXDB |
MODE_ANSI)) != 0;
+ my_bitmap_map *old_map;
DBUG_ENTER("store_create_info");
DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
@@ -960,6 +962,12 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
append_identifier(thd, packet, alias, strlen(alias));
packet->append(STRING_WITH_LEN(" (\n"));
+ /*
+ We need this to get default values from the table
+ We have to restore the read_set if we are called from insert in case
+ of row based replication.
+ */
+ old_map= tmp_use_all_columns(table, table->read_set);
for (ptr=table->field ; (field= *ptr); ptr++)
{
@@ -1121,10 +1129,11 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
table->field[key_part->fieldnr-1]->key_length() &&
!(key_info->flags & HA_FULLTEXT)))
{
+ char *end;
buff[0] = '(';
- char* end=int10_to_str((long) key_part->length /
- key_part->field->charset()->mbmaxlen,
- buff + 1,10);
+ end= int10_to_str((long) key_part->length /
+ key_part->field->charset()->mbmaxlen,
+ buff + 1,10);
*end++ = ')';
packet->append(buff,(uint) (end-buff));
}
@@ -1283,6 +1292,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
}
#endif
+ tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(0);
}
@@ -2490,8 +2500,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
{
int res;
/*
- Set the parent lex of 'sel' because it is needed by sel.init_query()
- which is called inside make_table_list.
+ Set the parent lex of 'sel' because it is needed by
+ sel.init_query() which is called inside make_table_list.
*/
sel.parent_lex= lex;
if (make_table_list(thd, &sel, base_name, file_name))
@@ -2686,50 +2696,55 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
case ROW_TYPE_COMPACT:
tmp_buff= "Compact";
break;
+ case ROW_TYPE_PAGES:
+ tmp_buff= "Paged";
+ break;
}
table->field[6]->store(tmp_buff, strlen(tmp_buff), cs);
if (!tables->schema_table)
{
- table->field[7]->store((longlong) file->records, TRUE);
+ table->field[7]->store((longlong) file->stats.records, TRUE);
table->field[7]->set_notnull();
}
- table->field[8]->store((longlong) file->mean_rec_length, TRUE);
- table->field[9]->store((longlong) file->data_file_length, TRUE);
- if (file->max_data_file_length)
+ table->field[8]->store((longlong) file->stats.mean_rec_length, TRUE);
+ table->field[9]->store((longlong) file->stats.data_file_length, TRUE);
+ if (file->stats.max_data_file_length)
{
- table->field[10]->store((longlong) file->max_data_file_length, TRUE);
+ table->field[10]->store((longlong) file->stats.max_data_file_length,
+ TRUE);
}
- table->field[11]->store((longlong) file->index_file_length, TRUE);
- table->field[12]->store((longlong) file->delete_length, TRUE);
+ table->field[11]->store((longlong) file->stats.index_file_length, TRUE);
+ table->field[12]->store((longlong) file->stats.delete_length, TRUE);
if (show_table->found_next_number_field)
{
- table->field[13]->store((longlong) file->auto_increment_value, TRUE);
+ table->field[13]->store((longlong) file->stats.auto_increment_value,
+ TRUE);
table->field[13]->set_notnull();
}
- if (file->create_time)
+ if (file->stats.create_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(&time,
- file->create_time);
+ file->stats.create_time);
table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[14]->set_notnull();
}
- if (file->update_time)
+ if (file->stats.update_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(&time,
- file->update_time);
+ file->stats.update_time);
table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[15]->set_notnull();
}
- if (file->check_time)
+ if (file->stats.check_time)
{
- thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time);
+ thd->variables.time_zone->gmt_sec_to_TIME(&time, file->stats.check_time);
table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[16]->set_notnull();
}
tmp_buff= (share->table_charset ?
share->table_charset->name : "default");
table->field[17]->store(tmp_buff, strlen(tmp_buff), cs);
- if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+ if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
{
table->field[18]->store((longlong) file->checksum(), TRUE);
table->field[18]->set_notnull();
@@ -2826,6 +2841,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
restore_record(show_table, s->default_values);
base_name_length= strlen(base_name);
file_name_length= strlen(file_name);
+ show_table->use_all_columns(); // Required for default
for (ptr=show_table->field; (field= *ptr) ; ptr++)
{
@@ -3332,7 +3348,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
KEY *key=show_table->key_info+i;
if (key->rec_per_key[j])
{
- ha_rows records=(show_table->file->records /
+ ha_rows records=(show_table->file->stats.records /
key->rec_per_key[j]);
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
@@ -3737,7 +3753,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *table,
table->field[20]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[20]->set_notnull();
}
- if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+ if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
{
table->field[21]->store((longlong) stat_info.check_sum, TRUE);
table->field[21]->set_notnull();
@@ -4431,7 +4447,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
field_count++;
}
TMP_TABLE_PARAM *tmp_table_param =
- (TMP_TABLE_PARAM*) (thd->calloc(sizeof(TMP_TABLE_PARAM)));
+ (TMP_TABLE_PARAM*) (thd->alloc(sizeof(TMP_TABLE_PARAM)));
tmp_table_param->init();
tmp_table_param->table_charset= cs;
tmp_table_param->field_count= field_count;
@@ -4803,7 +4819,7 @@ bool get_schema_tables_result(JOIN *join)
filesort_free_buffers(table_list->table);
}
else
- table_list->table->file->records= 0;
+ table_list->table->file->stats.records= 0;
if (table_list->schema_table->fill_table(thd, table_list,
tab->select_cond))
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index f890f504952..646acfd2908 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -75,7 +75,9 @@ uint tablename_to_filename(const char *from, char *to, uint to_length)
uint errors, length;
if (from[0] == '#' && !strncmp(from, MYSQL50_TABLE_NAME_PREFIX,
MYSQL50_TABLE_NAME_PREFIX_LENGTH))
- return my_snprintf(to, to_length, "%s", from + 9);
+ return (uint) (strmake(to, from+MYSQL50_TABLE_NAME_PREFIX_LENGTH,
+ to_length-1) -
+ (from + MYSQL50_TABLE_NAME_PREFIX_LENGTH));
length= strconvert(system_charset_info, from,
&my_charset_filename, to, to_length, &errors);
if (check_if_legal_tablename(to) &&
@@ -2277,7 +2279,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (sql_field->sql_type == FIELD_TYPE_BIT)
{
sql_field->pack_flag= FIELDFLAG_NUMBER;
- if (file->table_flags() & HA_CAN_BIT_FIELD)
+ if (file->ha_table_flags() & HA_CAN_BIT_FIELD)
total_uneven_bit_length+= sql_field->length & 7;
else
sql_field->pack_flag|= FIELDFLAG_TREAT_BIT_AS_CHAR;
@@ -2360,7 +2362,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (prepare_create_field(sql_field, &blob_columns,
&timestamps, &timestamps_with_niladic,
- file->table_flags()))
+ file->ha_table_flags()))
DBUG_RETURN(-1);
if (sql_field->sql_type == MYSQL_TYPE_VARCHAR)
create_info->varchar= 1;
@@ -2381,14 +2383,14 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
if (auto_increment &&
- (file->table_flags() & HA_NO_AUTO_INCREMENT))
+ (file->ha_table_flags() & HA_NO_AUTO_INCREMENT))
{
my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,
ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0));
DBUG_RETURN(-1);
}
- if (blob_columns && (file->table_flags() & HA_NO_BLOBS))
+ if (blob_columns && (file->ha_table_flags() & HA_NO_BLOBS))
{
my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB),
MYF(0));
@@ -2545,7 +2547,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (key->type == Key::FULLTEXT)
{
- if (!(file->table_flags() & HA_CAN_FULLTEXT))
+ if (!(file->ha_table_flags() & HA_CAN_FULLTEXT))
{
my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT),
MYF(0));
@@ -2563,7 +2565,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
/* TODO: Add proper checks if handler supports key_type and algorithm */
if (key_info->flags & HA_SPATIAL)
{
- if (!(file->table_flags() & HA_CAN_RTREEKEYS))
+ if (!(file->ha_table_flags() & HA_CAN_RTREEKEYS))
{
my_message(ER_TABLE_CANT_HANDLE_SPKEYS, ER(ER_TABLE_CANT_HANDLE_SPKEYS),
MYF(0));
@@ -2665,7 +2667,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag) ||
(f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL))
{
- if (!(file->table_flags() & HA_CAN_INDEX_BLOBS))
+ if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
{
my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name);
DBUG_RETURN(-1);
@@ -2702,22 +2704,24 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
null_fields--;
}
else
- key_info->flags|= HA_NULL_PART_KEY;
- if (!(file->table_flags() & HA_NULL_IN_KEY))
- {
- my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
- DBUG_RETURN(-1);
- }
- if (key->type == Key::SPATIAL)
- {
- my_message(ER_SPATIAL_CANT_HAVE_NULL,
- ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0));
- DBUG_RETURN(-1);
- }
+ {
+ key_info->flags|= HA_NULL_PART_KEY;
+ if (!(file->ha_table_flags() & HA_NULL_IN_KEY))
+ {
+ my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
+ DBUG_RETURN(-1);
+ }
+ if (key->type == Key::SPATIAL)
+ {
+ my_message(ER_SPATIAL_CANT_HAVE_NULL,
+ ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0));
+ DBUG_RETURN(-1);
+ }
+ }
}
if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
{
- if (column_nr == 0 || (file->table_flags() & HA_AUTO_PART_KEY))
+ if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY))
auto_increment--; // Field is used
}
}
@@ -2754,14 +2758,14 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
else if (!f_is_geom(sql_field->pack_flag) &&
(column->length > length ||
((f_is_packed(sql_field->pack_flag) ||
- ((file->table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
+ ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
(key_info->flags & HA_NOSAME))) &&
column->length != length)))
{
my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(-1);
}
- else if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS))
+ else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
length=column->length;
}
else if (length == 0)
@@ -2847,7 +2851,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
key_info++;
}
if (!unique_key && !primary_key &&
- (file->table_flags() & HA_REQUIRE_PRIMARY_KEY))
+ (file->ha_table_flags() & HA_REQUIRE_PRIMARY_KEY))
{
my_message(ER_REQUIRES_PRIMARY_KEY, ER(ER_REQUIRES_PRIMARY_KEY), MYF(0));
DBUG_RETURN(-1);
@@ -3053,8 +3057,8 @@ bool mysql_create_table_internal(THD *thd,
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
alias= table_case_name(create_info, table_name);
- if (!(file=get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
- create_info->db_type)))
+ if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
+ create_info->db_type)))
{
mem_alloc_error(sizeof(handler));
DBUG_RETURN(TRUE);
@@ -3199,7 +3203,8 @@ bool mysql_create_table_internal(THD *thd,
else if (create_info->db_type != engine_type)
{
delete file;
- if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, engine_type)))
+ if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
+ engine_type)))
{
mem_alloc_error(sizeof(handler));
DBUG_RETURN(TRUE);
@@ -3554,7 +3559,7 @@ mysql_rename_table(handlerton *base,
a lowercase file name, but we leave the .frm in mixed case.
*/
if (lower_case_table_names == 2 && file &&
- !(file->table_flags() & HA_FILE_BASED))
+ !(file->ha_table_flags() & HA_FILE_BASED))
{
strmov(tmp_name, old_name);
my_casedn_str(files_charset_info, tmp_name);
@@ -4836,7 +4841,7 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
if (!(tmp= field->is_equal(new_field)))
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
// Clear indexed marker
- field->add_index= 0;
+ field->flags&= ~FIELD_IN_ADD_INDEX;
changes|= tmp;
}
@@ -4912,7 +4917,7 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
{
// Mark field to be part of new key
field= table->field[key_part->fieldnr];
- field->add_index= 1;
+ field->flags|= FIELD_IN_ADD_INDEX;
}
DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
}
@@ -4939,7 +4944,7 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
{
// Mark field to be part of new key
field= table->field[key_part->fieldnr];
- field->add_index= 1;
+ field->flags|= FIELD_IN_ADD_INDEX;
}
DBUG_PRINT("info", ("index added: '%s'", new_key->name));
}
@@ -5022,6 +5027,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
alter_info->tablespace_op));
if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ)))
DBUG_RETURN(TRUE);
+ table->use_all_columns();
/* Check that we are not trying to rename to an existing table */
if (new_name)
@@ -5253,8 +5259,12 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
def_it.remove();
}
}
- else // This field was not dropped and not changed, add it to the list
- { // for the new table.
+ else
+ {
+ /*
+ This field was not dropped and not changed, add it to the list
+ for the new table.
+ */
create_list.push_back(def=new create_field(field,field));
alter_it.rewind(); // Change default if ALTER
Alter_column *alter;
@@ -5766,7 +5776,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->proc_info="copy to tmp table";
next_insert_id=thd->next_insert_id; // Remember for logging
copied=deleted=0;
- if (new_table && !(new_table->file->table_flags() & HA_NO_COPY_ON_ALTER))
+ if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
{
/* We don't want update TIMESTAMP fields during ALTER TABLE. */
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
@@ -6230,7 +6240,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
- to->file->start_bulk_insert(from->file->records);
+ to->file->start_bulk_insert(from->file->stats.records);
save_sql_mode= thd->variables.sql_mode;
@@ -6276,19 +6286,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
&tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
(from->sort.found_records = filesort(thd, from, sortorder, length,
- (SQL_SELECT *) 0, HA_POS_ERROR,
+ (SQL_SELECT *) 0, HA_POS_ERROR, 1,
&examined_rows)) ==
HA_POS_ERROR)
goto err;
};
- /*
- Handler must be told explicitly to retrieve all columns, because
- this function does not set field->query_id in the columns to the
- current query id
- */
- to->file->ha_set_all_bits_in_write_set();
- from->file->ha_retrieve_all_cols();
+ /* Tell handler that we have values for all columns in the to table */
+ to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore ||
handle_duplicates == DUP_REPLACE)
@@ -6441,10 +6446,10 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
{
t->pos_in_table_list= table;
- if (t->file->table_flags() & HA_HAS_CHECKSUM &&
+ if (t->file->ha_table_flags() & HA_HAS_CHECKSUM &&
!(check_opt->flags & T_EXTEND))
protocol->store((ulonglong)t->file->checksum());
- else if (!(t->file->table_flags() & HA_HAS_CHECKSUM) &&
+ else if (!(t->file->ha_table_flags() & HA_HAS_CHECKSUM) &&
(check_opt->flags & T_QUICK))
protocol->store_null();
else
@@ -6453,11 +6458,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
ha_checksum crc= 0;
uchar null_mask=256 - (1 << t->s->last_null_bit_pos);
- /*
- Set all bits in read set and inform InnoDB that we are reading all
- fields
- */
- t->file->ha_retrieve_all_cols();
+ t->use_all_columns();
if (t->file->ha_rnd_init(1))
protocol->store_null();
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index b67c22e0588..bddfd8c1f0c 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -24,7 +24,7 @@
class Table_triggers_list: public Sql_alloc
{
/* Triggers as SPs grouped by event, action_time */
- sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
+ sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
@@ -132,6 +132,10 @@ private:
const char *db_name,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name);
+ friend void st_table::mark_columns_needed_for_insert(void);
+ friend void st_table::mark_columns_needed_for_update(void);
+ friend void st_table::mark_columns_needed_for_delete(void);
+
};
extern const LEX_STRING trg_action_time_type_names[];
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 495ffe9f5d5..4b9de6905fe 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -152,7 +152,8 @@ void udf_init()
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL,1,0);
- while (!(error = read_record_info.read_record(&read_record_info)))
+ table->use_all_columns();
+ while (!(error= read_record_info.read_record(&read_record_info)))
{
DBUG_PRINT("info",("init udf record"));
LEX_STRING name;
@@ -449,7 +450,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
/* Allow creation of functions even if we can't open func table */
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
goto err;
-
+ table->use_all_columns();
restore_record(table, s->default_values); // Default values for fields
table->field[0]->store(u_d->name.str, u_d->name.length, system_charset_info);
table->field[1]->store((longlong) u_d->returns, TRUE);
@@ -507,8 +508,8 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
tables.table_name= tables.alias= (char*) "func";
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
goto err;
+ table->use_all_columns();
table->field[0]->store(udf_name->str, udf_name->length, system_charset_info);
- table->file->ha_retrieve_all_cols();
if (!table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,
table->key_info[0].key_length,
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 3c156acfc14..bf93f0d3bea 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -152,8 +152,8 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd)
order;
order=order->next)
{
- (*order->item)->walk(&Item::change_context_processor,
- (byte *) &fake_select_lex->context);
+ (*order->item)->walk(&Item::change_context_processor, 0,
+ (byte*) &fake_select_lex->context);
}
}
@@ -468,7 +468,7 @@ bool st_select_lex_unit::exec()
}
if (!res)
{
- records_at_start= table->file->records;
+ records_at_start= table->file->stats.records;
sl->join->exec();
if (sl == union_distinct)
{
@@ -507,7 +507,7 @@ bool st_select_lex_unit::exec()
rows and actual rows added to the temporary table.
*/
add_rows+= (ulonglong) (thd->limit_found_rows - (ulonglong)
- ((table->file->records - records_at_start)));
+ ((table->file->stats.records - records_at_start)));
}
}
}
@@ -567,7 +567,7 @@ bool st_select_lex_unit::exec()
fake_select_lex->table_list.empty();
if (!res)
{
- thd->limit_found_rows = (ulonglong)table->file->records + add_rows;
+ thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows;
thd->examined_row_count+= examined_rows;
}
/*
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 75e8db6621f..8846e70bdbc 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -29,7 +29,7 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields);
/* Return 0 if row hasn't changed */
-static bool compare_record(TABLE *table, query_id_t query_id)
+static bool compare_record(TABLE *table)
{
if (table->s->blob_fields + table->s->varchar_fields == 0)
return cmp_record(table,record[1]);
@@ -39,9 +39,9 @@ static bool compare_record(TABLE *table, query_id_t query_id)
table->s->null_bytes))
return TRUE; // Diff in NULL value
/* Compare updated fields */
- for (Field **ptr=table->field ; *ptr ; ptr++)
+ for (Field **ptr= table->field ; *ptr ; ptr++)
{
- if ((*ptr)->query_id == query_id &&
+ if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
return TRUE;
}
@@ -120,6 +120,7 @@ int mysql_update(THD *thd,
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= thd->options & OPTION_SAFE_UPDATES;
bool used_key_is_modified, transactional_table, will_batch;
+ bool can_compare_record;
int res;
int error, loc_error;
uint used_index= MAX_KEY, dup_key_found;
@@ -128,7 +129,6 @@ int mysql_update(THD *thd,
uint want_privilege;
#endif
uint table_count= 0;
- query_id_t query_id=thd->query_id, timestamp_query_id;
ha_rows updated, found;
key_map old_used_keys;
TABLE *table;
@@ -138,8 +138,6 @@ int mysql_update(THD *thd,
bool need_reopen;
DBUG_ENTER("mysql_update");
- LINT_INIT(timestamp_query_id);
-
for ( ; ; )
{
if (open_tables(thd, &table_list, &table_count, 0))
@@ -181,26 +179,12 @@ int mysql_update(THD *thd,
DBUG_RETURN(1);
old_used_keys= table->used_keys; // Keys used in WHERE
- /*
- Change the query_id for the timestamp column so that we can
- check if this is modified directly
- */
- if (table->timestamp_field)
- {
- timestamp_query_id=table->timestamp_field->query_id;
- table->timestamp_field->query_id=thd->query_id-1;
- }
-
/* Check the fields we are going to modify */
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
table_list->register_want_access(want_privilege);
#endif
- /*
- Indicate that the set of fields is to be updated by passing 2 for
- set_query_id.
- */
- if (setup_fields_with_no_wrap(thd, 0, fields, 2, 0, 0))
+ if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
if (table_list->view && check_fields(thd, fields))
{
@@ -214,12 +198,13 @@ int mysql_update(THD *thd,
if (table->timestamp_field)
{
// Don't set timestamp column if this is modified
- if (table->timestamp_field->query_id == thd->query_id)
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
else
{
- table->timestamp_field->query_id=timestamp_query_id;
- table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
}
}
@@ -228,7 +213,7 @@ int mysql_update(THD *thd,
table_list->grant.want_privilege= table->grant.want_privilege=
(SELECT_ACL & ~table->grant.privilege);
#endif
- if (setup_fields(thd, 0, values, 1, 0, 0))
+ if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
{
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(1); /* purecov: inspected */
@@ -252,7 +237,7 @@ int mysql_update(THD *thd,
DBUG_RETURN(0);
}
#endif
- /* Update the table->file->records number */
+ /* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
select= make_select(table, 0, 0, conds, 0, &error);
@@ -313,19 +298,23 @@ int mysql_update(THD *thd,
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
- table->file->ha_retrieve_all_cols();
if (used_index < MAX_KEY && old_used_keys.is_set(used_index))
{
table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
+ table->mark_columns_used_by_index(used_index);
+ }
+ else
+ {
+ table->use_all_columns();
}
- /* note: can actually avoid sorting below.. */
+ /* note: We avoid sorting avoid if we sort on the used index */
if (order && (need_sort || used_key_is_modified))
{
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
+ NOTE: filesort will call table->prepare_for_position()
*/
uint length;
SORT_FIELD *sortorder;
@@ -334,12 +323,11 @@ int mysql_update(THD *thd,
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if (!(sortorder=make_unireg_sortorder(order, &length)) ||
- (table->sort.found_records = filesort(thd, table, sortorder, length,
- select, limit,
- &examined_rows))
+ (table->sort.found_records= filesort(thd, table, sortorder, length,
+ select, limit, 1,
+ &examined_rows))
== HA_POS_ERROR)
{
- free_io_cache(table);
goto err;
}
/*
@@ -365,6 +353,7 @@ int mysql_update(THD *thd,
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
goto err;
+ table->file->try_semi_consistent_read(1);
/*
When we get here, we have one of the following options:
@@ -376,11 +365,6 @@ int mysql_update(THD *thd,
B.2 quick select is not used, this is full index scan (with LIMIT)
Full index scan must be started with init_read_record_idx
*/
- /* If quick select is used, initialize it before retrieving rows. */
- if (select && select->quick && select->quick->reset())
- goto err;
-
- table->file->try_semi_consistent_read(1);
if (used_index == MAX_KEY || (select && select->quick))
init_read_record(&info,thd,table,select,0,1);
@@ -440,17 +424,14 @@ int mysql_update(THD *thd,
goto err;
}
if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->restore_column_maps_after_mark_index();
}
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (select && select->quick && select->quick->reset())
- goto err;
+ goto err;
table->file->try_semi_consistent_read(1);
init_read_record(&info,thd,table,select,0,1);
@@ -458,7 +439,6 @@ int mysql_update(THD *thd,
thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */
thd->cuted_fields=0L;
thd->proc_info="Updating";
- query_id=thd->query_id;
transactional_table= table->file->has_transactions();
thd->no_trans_update= 0;
@@ -468,12 +448,23 @@ int mysql_update(THD *thd,
MODE_STRICT_ALL_TABLES)));
will_batch= !table->file->start_bulk_update();
+ table->mark_columns_needed_for_update();
+
+ /*
+ We can use compare_record() to optimize away updates if
+ the table handler is returning all columns OR if
+ if all updated columns are read
+ */
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set, table->read_set));
+
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
{
if (table->file->was_semi_consistent_read())
- continue; /* repeat the read of the same row if it still exists */
+ continue; /* repeat the read of the same row if it still exists */
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
@@ -483,7 +474,7 @@ int mysql_update(THD *thd,
found++;
- if (compare_record(table, query_id))
+ if (!can_compare_record || compare_record(table))
{
if ((res= table_list->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
@@ -632,7 +623,6 @@ int mysql_update(THD *thd,
table->file->end_bulk_update();
table->file->try_semi_consistent_read(0);
end_read_record(&info);
- free_io_cache(table); // If ORDER BY
delete select;
thd->proc_info= "end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
@@ -698,7 +688,6 @@ int mysql_update(THD *thd,
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
thd->abort_on_warning= 0;
- free_io_cache(table);
DBUG_RETURN((error >= 0 || thd->net.report_error) ? 1 : 0);
err:
@@ -749,8 +738,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
thd->lex->allow_sum_func= 0;
if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
- table_list, conds, &select_lex->leaf_tables,
- FALSE) ||
+ table_list, &select_lex->leaf_tables, FALSE) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
select_lex->setup_ref_array(thd, order_num) ||
setup_order(thd, select_lex->ref_pointer_array,
@@ -843,11 +831,10 @@ reopen_tables:
if (setup_tables(thd, &lex->select_lex.context,
&lex->select_lex.top_join_list,
- table_list, &lex->select_lex.where,
- &lex->select_lex.leaf_tables, FALSE))
+ table_list, &lex->select_lex.leaf_tables, FALSE))
DBUG_RETURN(TRUE);
- if (setup_fields_with_no_wrap(thd, 0, *fields, 2, 0, 0))
+ if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(TRUE);
for (tl= table_list; tl ; tl= tl->next_local)
@@ -875,7 +862,8 @@ reopen_tables:
TABLE *table= tl->table;
/* Only set timestamp column if this is not modified */
if (table->timestamp_field &&
- table->timestamp_field->query_id == thd->query_id)
+ bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/* if table will be updated then check that it is unique */
@@ -887,6 +875,7 @@ reopen_tables:
DBUG_RETURN(TRUE);
}
+ table->mark_columns_needed_for_update();
DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
/*
If table will be updated we should not downgrade lock for it and
@@ -1088,7 +1077,7 @@ int multi_update::prepare(List<Item> &not_used_values,
reference tables
*/
- if (setup_fields(thd, 0, *values, 1, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(1);
/*
@@ -1207,7 +1196,9 @@ multi_update::initialize_tables(JOIN *join)
Item_field *ifield;
List<Item> temp_fields= *fields_for_table[cnt];
ORDER group;
+ TMP_TABLE_PARAM *tmp_param;
+ table->mark_columns_needed_for_update();
if (table == main_table) // First table in join
{
if (safe_update_on_fly(join->join_tab, &temp_fields))
@@ -1216,9 +1207,9 @@ multi_update::initialize_tables(JOIN *join)
continue;
}
}
+ table->prepare_for_position();
- TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
-
+ tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
@@ -1313,7 +1304,7 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
!(table->triggers &&
table->triggers->has_before_update_triggers());
/* If scanning in clustered key */
- if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
table->s->primary_key < MAX_KEY)
return !check_if_key_used(table, table->s->primary_key, *fields) &&
!(table->triggers &&
@@ -1359,6 +1350,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
TABLE *table= cur_table->table;
+ uint offset= cur_table->shared;
/*
Check if we are using outer join and we didn't find the row
or if we have already updated this row in the previous call to this
@@ -1374,10 +1366,18 @@ bool multi_update::send_data(List<Item> &not_used_values)
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
continue;
- uint offset= cur_table->shared;
- table->file->position(table->record[0]);
+ /*
+ We can use compare_record() to optimize away updates if
+ the table handler is returning all columns OR if
+ if all updated columns are read
+ */
if (table == table_to_update)
{
+ bool can_compare_record;
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set,
+ table->read_set));
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
@@ -1387,7 +1387,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
DBUG_RETURN(1);
found++;
- if (compare_record(table, thd->query_id))
+ if (!can_compare_record || compare_record(table))
{
int error;
if ((error= cur_table->view_check_option(thd, ignore)) !=
@@ -1439,6 +1439,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
{
int error;
TABLE *tmp_table= tmp_tables[offset];
+ table->file->position(table->record[0]);
fill_record(thd, tmp_table->field+1, *values_for_table[offset], 1);
/* Store pointer to row */
memcpy((char*) tmp_table->field[0]->ptr,
@@ -1504,6 +1505,7 @@ int multi_update::do_updates(bool from_send_error)
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
byte *ref_pos;
+ bool can_compare_record;
table = cur_table->table;
if (table == table_to_update)
@@ -1530,6 +1532,11 @@ int multi_update::do_updates(bool from_send_error)
if ((local_error = tmp_table->file->ha_rnd_init(1)))
goto err;
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set,
+ table->read_set));
+
ref_pos= (byte*) tmp_table->field[0]->ptr;
for (;;)
{
@@ -1559,7 +1566,7 @@ int multi_update::do_updates(bool from_send_error)
TRG_ACTION_BEFORE, TRUE))
goto err2;
- if (compare_record(table, thd->query_id))
+ if (!can_compare_record || compare_record(table))
{
if ((local_error=table->file->ha_update_row(table->record[1],
table->record[0])))
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 8e5a776950d..c0096bc292d 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1343,8 +1343,8 @@ frm_type_enum mysql_frm_type(THD *thd, char *path, enum legacy_db_type *dbt)
view view for check with opened table
DESCRIPTION
- If it is VIEW and query have LIMIT clause then check that undertlying
- table of viey contain one of following:
+ If it is VIEW and query have LIMIT clause then check that underlying
+ table of view contain one of following:
1) primary key of underlying table
2) unique key underlying table with fields for which NULL value is
impossible
@@ -1385,19 +1385,19 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
this operation should not have influence on Field::query_id, to avoid
marking as used fields which are not used
*/
- bool save_set_query_id= thd->set_query_id;
- thd->set_query_id= 0;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
for (Field_translator *fld= trans; fld < end_of_trans; fld++)
{
if (!fld->item->fixed && fld->item->fix_fields(thd, &fld->item))
{
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
return TRUE;
}
}
- thd->set_query_id= save_set_query_id;
- DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
}
/* Loop over all keys to see if a unique-not-null key is used */
for (;key_info != key_info_end ; key_info++)
@@ -1550,7 +1550,6 @@ mysql_rename_view(THD *thd,
File_parser *parser;
char view_path[FN_REFLEN];
bool error= TRUE;
-
DBUG_ENTER("mysql_rename_view");
strxnmov(view_path, FN_REFLEN-1, mysql_data_home, "/", view->db, "/",
diff --git a/sql/table.cc b/sql/table.cc
index bacb703a28c..4309695b458 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -18,7 +18,7 @@
/* Some general useful functions */
#include "mysql_priv.h"
-#include <errno.h>
+#include "sql_trigger.h"
#include <m_ctype.h>
#include "md5.h"
@@ -93,21 +93,16 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
{
MEM_ROOT mem_root;
TABLE_SHARE *share;
- char path[FN_REFLEN], normalized_path[FN_REFLEN];
- uint path_length, normalized_length;
+ char path[FN_REFLEN];
+ uint path_length;
path_length= build_table_filename(path, sizeof(path) - 1,
table_list->db,
table_list->table_name, "");
- normalized_length= build_table_filename(normalized_path,
- sizeof(normalized_path) - 1,
- table_list->db,
- table_list->table_name, "");
-
init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
if ((share= (TABLE_SHARE*) alloc_root(&mem_root,
sizeof(*share) + key_length +
- path_length + normalized_length +2)))
+ path_length +1)))
{
bzero((char*) share, sizeof(*share));
share->table_cache_key.str= (char*) (share+1);
@@ -123,9 +118,8 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
share->path.str= share->table_cache_key.str+ key_length;
share->path.length= path_length;
strmov(share->path.str, path);
- share->normalized_path.str= share->path.str+ path_length+1;
- share->normalized_path.length= normalized_length;
- strmov(share->normalized_path.str, normalized_path);
+ share->normalized_path.str= share->path.str;
+ share->normalized_path.length= path_length;
share->version= refresh_version;
share->flush_version= flush_version;
@@ -434,6 +428,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
Field **field_ptr, *reg_field;
const char **interval_array;
enum legacy_db_type legacy_db_type;
+ my_bitmap_map *bitmaps;
DBUG_ENTER("open_binary_frm");
new_field_pack_flag= head[27];
@@ -977,7 +972,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
goto err; /* purecov: inspected */
}
- reg_field->fieldnr= i+1; //Set field number
reg_field->field_index= i;
reg_field->comment=comment;
if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
@@ -1012,7 +1006,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
uint primary_key=(uint) (find_type((char*) primary_key_name,
&share->keynames, 3) - 1);
- uint ha_option= handler_file->table_flags();
+ uint ha_option= handler_file->ha_table_flags();
keyinfo= share->key_info;
key_part= keyinfo->key_part;
@@ -1101,6 +1095,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
share->keys_for_keyread.set_bit(key);
field->part_of_key.set_bit(key);
+ field->part_of_key_not_clustered.set_bit(key);
}
if (handler_file->index_flags(key, i, 1) & HA_READ_ORDER)
field->part_of_sortkey.set_bit(key);
@@ -1258,6 +1253,14 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
share->last_null_bit_pos= null_bit_pos;
share->db_low_byte_first= handler_file->low_byte_first();
+ share->column_bitmap_size= bitmap_buffer_size(share->fields);
+
+ if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root,
+ share->column_bitmap_size)))
+ goto err;
+ bitmap_init(&share->all_set, bitmaps, share->fields, FALSE);
+ bitmap_set_all(&share->all_set);
+
delete handler_file;
#ifndef DBUG_OFF
if (use_hash)
@@ -1309,18 +1312,15 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
TABLE *outparam, bool is_create_table)
{
int error;
- uint records, i;
+ uint records, i, bitmap_size;
bool error_reported= FALSE;
- byte *record;
+ byte *record, *bitmaps;
Field **field_ptr;
- MEM_ROOT **root_ptr, *old_root;
DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
share->table_name.str, outparam));
error= 1;
- root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
- old_root= *root_ptr;
bzero((char*) outparam, sizeof(*outparam));
outparam->in_use= thd;
outparam->s= share;
@@ -1328,7 +1328,6 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
outparam->write_row_record= NULL;
init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
- *root_ptr= &outparam->mem_root;
if (!(outparam->alias= my_strdup(alias, MYF(MY_WME))))
goto err;
@@ -1460,23 +1459,39 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (share->partition_info_len)
{
- if (mysql_unpack_partition(thd, share->partition_info,
- share->partition_info_len,
- (uchar*)share->part_state,
- share->part_state_len,
- outparam, is_create_table,
- share->default_part_db_type))
- goto err;
- /*
- Fix the partition functions and ensure they are not constant
- functions
- */
- if (fix_partition_func(thd, share->normalized_path.str, outparam,
- is_create_table))
+ MEM_ROOT **root_ptr, *old_root;
+ bool tmp;
+ root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ old_root= *root_ptr;
+ *root_ptr= &outparam->mem_root;
+
+ tmp= (mysql_unpack_partition(thd, share->partition_info,
+ share->partition_info_len,
+ (uchar*)share->part_state,
+ share->part_state_len,
+ outparam, is_create_table,
+ share->default_part_db_type) ||
+ fix_partition_func(thd, share->normalized_path.str, outparam,
+ is_create_table));
+ *root_ptr= old_root;
+ if (tmp)
goto err;
}
#endif
+ /* Allocate bitmaps */
+
+ bitmap_size= share->column_bitmap_size;
+ if (!(bitmaps= (byte*) alloc_root(&outparam->mem_root, bitmap_size*3)))
+ goto err;
+ bitmap_init(&outparam->def_read_set,
+ (my_bitmap_map*) bitmaps, share->fields, FALSE);
+ bitmap_init(&outparam->def_write_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size), share->fields, FALSE);
+ bitmap_init(&outparam->tmp_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size*2), share->fields, FALSE);
+ outparam->default_column_bitmaps();
+
/* The table struct is now initialized; Open the table */
error= 2;
if (db_stat)
@@ -1518,13 +1533,15 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
}
}
- *root_ptr= old_root;
+#if defined(HAVE_purify) && !defined(DBUG_OFF)
+ bzero((char*) bitmaps, bitmap_size*3);
+#endif
+
thd->status_var.opened_tables++;
DBUG_RETURN (0);
err:
- *root_ptr= old_root;
if (! error_reported)
open_table_error(share, error, my_errno, 0);
delete outparam->file;
@@ -1555,6 +1572,7 @@ int closefrm(register TABLE *table, bool free_share)
uint idx;
KEY *key_info;
DBUG_ENTER("closefrm");
+ DBUG_PRINT("enter", ("table: 0x%lx", (long) table));
if (table->db_stat)
error=table->file->close();
@@ -2338,7 +2356,7 @@ table_check_intact(TABLE *table, uint table_f_count,
DBUG_PRINT("info",("last_create_time=%d", *last_create_time));
if ((fields_diff_count= (table->s->fields != table_f_count)) ||
- (*last_create_time != table->file->create_time))
+ (*last_create_time != table->file->stats.create_time))
{
DBUG_PRINT("info", ("I am suspecting, checking table"));
if (fields_diff_count)
@@ -2428,14 +2446,14 @@ table_check_intact(TABLE *table, uint table_f_count,
}
}
if (!error)
- *last_create_time= table->file->create_time;
+ *last_create_time= table->file->stats.create_time;
else if (!fields_diff_count && error_num)
my_error(error_num,MYF(0), table->alias, table_f_count, table->s->fields);
}
else
{
DBUG_PRINT("info", ("Table seems ok without thorough checking."));
- *last_create_time= table->file->create_time;
+ *last_create_time= table->file->stats.create_time;
}
DBUG_RETURN(error);
@@ -2874,7 +2892,7 @@ void st_table_list::cleanup_items()
for (Field_translator *transl= field_translation;
transl < field_translation_end;
transl++)
- transl->item->walk(&Item::cleanup_processor, 0);
+ transl->item->walk(&Item::cleanup_processor, 0, 0);
}
@@ -3424,7 +3442,7 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
field= *field_ref;
}
thd->lex->current_select->no_wrap_view_item= save_wrapper;
- if (thd->lex->current_select->no_wrap_view_item)
+ if (save_wrapper)
{
DBUG_RETURN(field);
}
@@ -3718,6 +3736,276 @@ Field_iterator_table_ref::get_natural_column_ref()
return nj_col;
}
+/*****************************************************************************
+ Functions to handle column usage bitmaps (read_set, write_set etc...)
+*****************************************************************************/
+
+/* Reset all columns bitmaps */
+
+void st_table::clear_column_bitmaps()
+{
+ /*
+ Reset column read/write usage. It's identical to:
+ bitmap_clear_all(&table->def_read_set);
+ bitmap_clear_all(&table->def_write_set);
+ */
+ bzero((char*) def_read_set.bitmap, s->column_bitmap_size*2);
+ column_bitmaps_set(&def_read_set, &def_write_set);
+}
+
+
+/*
+ Tell handler we are going to call position() and rnd_pos() later.
+
+ NOTES:
+ This is needed for handlers that uses the primary key to find the
+ row. In this case we have to extend the read bitmap with the primary
+ key fields.
+*/
+
+void st_table::prepare_for_position()
+{
+ DBUG_ENTER("st_table::prepare_for_position");
+
+ if ((file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ s->primary_key < MAX_KEY)
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ /* signal change */
+ file->column_bitmaps_signal();
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Mark that only fields from one key is used
+
+ NOTE:
+ This changes the bitmap to use the tmp bitmap
+ After this, you can't access any other columns in the table until
+ bitmaps are reset, for example with st_table::clear_column_bitmaps()
+ or st_table::restore_column_maps_after_mark_index()
+*/
+
+void st_table::mark_columns_used_by_index(uint index)
+{
+ MY_BITMAP *bitmap= &tmp_set;
+ DBUG_ENTER("st_table::mark_columns_used_by_index");
+
+ (void) file->extra(HA_EXTRA_KEYREAD);
+ bitmap_clear_all(bitmap);
+ mark_columns_used_by_index_no_reset(index, bitmap);
+ column_bitmaps_set(bitmap, bitmap);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Restore to use normal column maps after key read
+
+ NOTES
+ This reverse the change done by mark_columns_used_by_index
+
+ WARNING
+ For this to work, one must have the normal table maps in place
+ when calling mark_columns_used_by_index
+*/
+
+void st_table::restore_column_maps_after_mark_index()
+{
+ DBUG_ENTER("st_table::restore_column_maps_after_mark_index");
+
+ key_read= 0;
+ (void) file->extra(HA_EXTRA_NO_KEYREAD);
+ default_column_bitmaps();
+ file->column_bitmaps_signal();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ mark columns used by key, but don't reset other fields
+*/
+
+void st_table::mark_columns_used_by_index_no_reset(uint index,
+ MY_BITMAP *bitmap)
+{
+ KEY_PART_INFO *key_part= key_info[index].key_part;
+ KEY_PART_INFO *key_part_end= (key_part +
+ key_info[index].key_parts);
+ for (;key_part != key_part_end; key_part++)
+ bitmap_set_bit(bitmap, key_part->fieldnr-1);
+}
+
+
+/*
+ Mark auto-increment fields as used fields in both read and write maps
+
+ NOTES
+ This is needed in insert & update as the auto-increment field is
+ always set and sometimes read.
+*/
+
+void st_table::mark_auto_increment_column()
+{
+ DBUG_ASSERT(found_next_number_field);
+ /*
+ We must set bit in read set as update_auto_increment() is using the
+ store() to check overflow of auto_increment values
+ */
+ bitmap_set_bit(read_set, found_next_number_field->field_index);
+ bitmap_set_bit(write_set, found_next_number_field->field_index);
+ if (s->next_number_key_offset)
+ mark_columns_used_by_index_no_reset(s->next_number_index, read_set);
+ file->column_bitmaps_signal();
+}
+
+
+/*
+ Mark columns needed for doing an delete of a row
+
+ DESCRIPTON
+ Some table engines don't have a cursor on the retrieve rows
+ so they need either to use the primary key or all columns to
+ be able to delete a row.
+
+ If the engine needs this, the function works as follows:
+ - If primary key exits, mark the primary key columns to be read.
+ - If not, mark all columns to be read
+
+ If the engine has HA_REQUIRES_KEY_COLUMNS_FOR_DELETE, we will
+ mark all key columns as 'to-be-read'. This allows the engine to
+ loop over the given record to find all keys and doesn't have to
+ retrieve the row again.
+*/
+
+void st_table::mark_columns_needed_for_delete()
+{
+ if (triggers)
+ {
+ if (triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
+ triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER])
+ {
+ /* TODO: optimize to only add columns used by trigger */
+ use_all_columns();
+ return;
+ }
+ }
+
+ if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
+ {
+ Field **reg_field;
+ for (reg_field= field ; *reg_field ; reg_field++)
+ {
+ if ((*reg_field)->flags & PART_KEY_FLAG)
+ bitmap_set_bit(read_set, (*reg_field)->field_index);
+ }
+ file->column_bitmaps_signal();
+ }
+ if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
+ {
+ /*
+ If the handler has no cursor capabilites, we have to read either
+ the primary key, the hidden primary key or all columns to be
+ able to do an delete
+ */
+ if (s->primary_key == MAX_KEY)
+ file->use_hidden_primary_key();
+ else
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ file->column_bitmaps_signal();
+ }
+ }
+}
+
+
+/*
+ Mark columns needed for doing an update of a row
+
+ DESCRIPTON
+ Some engines needs to have all columns in an update (to be able to
+ build a complete row). If this is the case, we mark all not
+ updated columns to be read.
+
+ If this is no the case, we do like in the delete case and mark
+ if neeed, either the primary key column or all columns to be read.
+ (see mark_columns_needed_for_delete() for details)
+
+ If the engine has HA_REQUIRES_KEY_COLUMNS_FOR_DELETE, we will
+ mark all USED key columns as 'to-be-read'. This allows the engine to
+ loop over the given record to find all changed keys and doesn't have to
+ retrieve the row again.
+*/
+
+void st_table::mark_columns_needed_for_update()
+{
+ DBUG_ENTER("mark_columns_needed_for_update");
+ if (triggers)
+ {
+ if (triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] ||
+ triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER])
+ {
+ /* TODO: optimize to only add columns used by trigger */
+ use_all_columns();
+ DBUG_VOID_RETURN;
+ }
+ }
+ if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
+ {
+ /* Mark all used key columns for read */
+ Field **reg_field;
+ for (reg_field= field ; *reg_field ; reg_field++)
+ {
+ /* Merge keys is all keys that had a column refered to in the query */
+ if (merge_keys.is_overlapping((*reg_field)->part_of_key))
+ bitmap_set_bit(read_set, (*reg_field)->field_index);
+ }
+ file->column_bitmaps_signal();
+ }
+ if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
+ {
+ /*
+ If the handler has no cursor capabilites, we have to read either
+ the primary key, the hidden primary key or all columns to be
+ able to do an update
+ */
+ if (s->primary_key == MAX_KEY)
+ file->use_hidden_primary_key();
+ else
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ file->column_bitmaps_signal();
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Mark columns the handler needs for doing an insert
+
+ For now, this is used to mark fields used by the trigger
+ as changed.
+*/
+
+void st_table::mark_columns_needed_for_insert()
+{
+ if (triggers)
+ {
+ if (triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] ||
+ triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_AFTER])
+ {
+ /* TODO: optimize to only add columns used by trigger */
+ use_all_columns();
+ return;
+ }
+ }
+ if (found_next_number_field)
+ mark_auto_increment_column();
+}
+
/*****************************************************************************
** Instansiate templates
diff --git a/sql/table.h b/sql/table.h
index 5fd9cd28585..3058301874c 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -137,6 +137,7 @@ typedef struct st_table_share
char *comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
+ MY_BITMAP all_set;
/* A pair "database_name\0table_name\0", widely used as simply a db name */
LEX_STRING table_cache_key;
LEX_STRING db; /* Pointer to db */
@@ -181,6 +182,7 @@ typedef struct st_table_share
uint next_number_index;
uint next_number_key_offset;
uint error, open_errno, errarg; /* error from open_table_def() */
+ uint column_bitmap_size;
uchar frm_version;
bool null_field_first;
bool system; /* Set if system table (one record) */
@@ -243,7 +245,7 @@ struct st_table {
byte *write_row_record; /* Used as optimisation in
THD::write_row */
byte *insert_values; /* used by INSERT ... UPDATE */
- key_map quick_keys, used_keys, keys_in_use_for_query;
+ key_map quick_keys, used_keys, keys_in_use_for_query, merge_keys;
KEY *key_info; /* data of keys in database */
Field *next_number_field; /* Set if next_number is activated */
@@ -256,8 +258,9 @@ struct st_table {
ORDER *group;
const char *alias; /* alias or table name */
uchar *null_flags;
- MY_BITMAP *read_set;
- MY_BITMAP *write_set;
+ my_bitmap_map *bitmap_init_value;
+ MY_BITMAP def_read_set, def_write_set, tmp_set; /* containers */
+ MY_BITMAP *read_set, *write_set; /* Active column sets */
query_id_t query_id;
ha_rows quick_rows[MAX_KEY];
@@ -328,6 +331,39 @@ struct st_table {
bool fill_item_list(List<Item> *item_list) const;
void reset_item_list(List<Item> *item_list) const;
+ void clear_column_bitmaps(void);
+ void prepare_for_position(void);
+ void mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *map);
+ void mark_columns_used_by_index(uint index);
+ void restore_column_maps_after_mark_index();
+ void mark_auto_increment_column(void);
+ void mark_columns_needed_for_update(void);
+ void mark_columns_needed_for_delete(void);
+ void mark_columns_needed_for_insert(void);
+ inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
+ MY_BITMAP *write_set_arg)
+ {
+ read_set= read_set_arg;
+ write_set= write_set_arg;
+ if (file)
+ file->column_bitmaps_signal();
+ }
+ inline void column_bitmaps_set_no_signal(MY_BITMAP *read_set_arg,
+ MY_BITMAP *write_set_arg)
+ {
+ read_set= read_set_arg;
+ write_set= write_set_arg;
+ }
+ inline void use_all_columns()
+ {
+ column_bitmaps_set(&s->all_set, &s->all_set);
+ }
+ inline void default_column_bitmaps()
+ {
+ read_set= &def_read_set;
+ write_set= &def_write_set;
+ }
+
};
@@ -899,3 +935,38 @@ my_bool
table_check_intact(TABLE *table, uint table_f_count,
TABLE_FIELD_W_TYPE *table_def, time_t *last_create_time,
int error_num);
+
+static inline my_bitmap_map *tmp_use_all_columns(TABLE *table,
+ MY_BITMAP *bitmap)
+{
+ my_bitmap_map *old= bitmap->bitmap;
+ bitmap->bitmap= table->s->all_set.bitmap;
+ return old;
+}
+
+
+static inline void tmp_restore_column_map(MY_BITMAP *bitmap,
+ my_bitmap_map *old)
+{
+ bitmap->bitmap= old;
+}
+
+/* The following is only needed for debugging */
+
+static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
+ MY_BITMAP *bitmap)
+{
+#ifndef DBUG_OFF
+ return tmp_use_all_columns(table, bitmap);
+#else
+ return 0;
+#endif
+}
+
+static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
+ my_bitmap_map *old)
+{
+#ifndef DBUG_OFF
+ tmp_restore_column_map(bitmap, old);
+#endif
+}
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 228a8cd9b92..77d7efdcf7c 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1661,6 +1661,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
for MyISAM.
*/
(void)table->file->ha_index_init(0, 1);
+ table->use_all_columns();
+
tz_leapcnt= 0;
res= table->file->index_first(table->record[0]);
@@ -1804,10 +1806,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
#ifdef ABBR_ARE_USED
char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
#endif
-
DBUG_ENTER("tz_load_from_open_tables");
-
/* Prepare tz_info for loading also let us make copy of time zone name */
if (!(alloc_buff= alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
tz_name->length() + 1)))
@@ -1830,6 +1830,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
*/
table= tz_tables->table;
tz_tables= tz_tables->next_local;
+ table->use_all_columns();
table->field[0]->store(tz_name->ptr(), tz_name->length(),
&my_charset_latin1);
/*
@@ -1862,6 +1863,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
using the only index in this table).
*/
table= tz_tables->table;
+ table->use_all_columns();
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
@@ -1889,6 +1891,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
Right - using special index.
*/
table= tz_tables->table;
+ table->use_all_columns();
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
@@ -1962,6 +1965,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
in ascending order by index scan also satisfies us.
*/
table= tz_tables->table;
+ table->use_all_columns();
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
@@ -2280,14 +2284,15 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables)
RETURN VALUE
Pointer to corresponding Time_zone object. 0 - in case of bad time zone
specification or other error.
-
*/
+
Time_zone *my_tz_find_with_opening_tz_tables(THD *thd, const String *name)
{
Time_zone *tz;
DBUG_ENTER("my_tz_find_with_opening_tables");
DBUG_ASSERT(thd);
DBUG_ASSERT(thd->slave_thread); // intended for use with slave thread only
+
if (!(tz= my_tz_find(name, 0)) && time_zone_tables_exist)
{
/*
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index e39ee976eb1..970744566c9 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -139,7 +139,7 @@ static HASH archive_open_tables;
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
/* Static declarations for handerton */
-static handler *archive_create_handler(TABLE_SHARE *table);
+static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
/*
Number of rows that will force a bulk insert.
*/
@@ -189,9 +189,9 @@ handlerton archive_hton = {
};
-static handler *archive_create_handler(TABLE_SHARE *table)
+static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_archive(table);
+ return new (mem_root) ha_archive(table);
}
/*
@@ -697,8 +697,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
int error;
DBUG_ENTER("ha_archive::create");
- auto_increment_value= (create_info->auto_increment_value ?
- create_info->auto_increment_value -1 :
+ stats.auto_increment_value= (create_info->auto_increment_value ?
+ create_info->auto_increment_value -1 :
(ulonglong) 0);
if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
@@ -727,7 +727,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
- write_meta_file(create_file, 0, auto_increment_value, 0,
+ write_meta_file(create_file, 0, stats.auto_increment_value, 0,
(char *)create_info->data_file_name,
FALSE);
my_close(create_file,MYF(0));
@@ -923,7 +923,7 @@ int ha_archive::write_row(byte *buf)
else
{
if (temp_auto > share->auto_increment_value)
- auto_increment_value= share->auto_increment_value= temp_auto;
+ stats.auto_increment_value= share->auto_increment_value= temp_auto;
}
}
@@ -1058,7 +1058,7 @@ int ha_archive::rnd_init(bool scan)
{
scan_rows= share->rows_recorded;
DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
- records= 0;
+ stats.records= 0;
/*
If dirty, we lock, and then reset/flush the data.
@@ -1095,6 +1095,7 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
uint *ptr, *end;
char *last;
size_t total_blob_length= 0;
+ MY_BITMAP *read_set= table->read_set;
DBUG_ENTER("ha_archive::get_row");
read= azread(file_to_read, buf, table->s->reclength);
@@ -1120,8 +1121,9 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
ptr != end ;
ptr++)
{
- if (ha_get_bit_in_read_set(((Field_blob*) table->field[*ptr])->fieldnr))
- total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
+ if (bitmap_is_set(read_set,
+ (((Field_blob*) table->field[*ptr])->field_index)))
+ total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
}
/* Adjust our row buffer if we need be */
@@ -1136,7 +1138,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
size_t size= ((Field_blob*) table->field[*ptr])->get_length();
if (size)
{
- if (ha_get_bit_in_read_set(((Field_blob*) table->field[*ptr])->fieldnr))
+ if (bitmap_is_set(read_set,
+ ((Field_blob*) table->field[*ptr])->field_index))
{
read= azread(file_to_read, last, size);
if ((size_t) read != size)
@@ -1177,7 +1180,7 @@ int ha_archive::rnd_next(byte *buf)
if (rc != HA_ERR_END_OF_FILE)
- records++;
+ stats.records++;
DBUG_RETURN(rc);
}
@@ -1298,7 +1301,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (!rc)
{
share->rows_recorded= 0;
- auto_increment_value= share->auto_increment_value= 0;
+ stats.auto_increment_value= share->auto_increment_value= 0;
while (!(rc= get_row(&archive, buf)))
{
real_write_row(buf, &writer);
@@ -1308,7 +1311,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
ulonglong auto_value=
(ulonglong) field->val_int((char*)(buf + field->offset()));
if (share->auto_increment_value < auto_value)
- auto_increment_value= share->auto_increment_value=
+ stats.auto_increment_value= share->auto_increment_value=
auto_value;
}
share->rows_recorded++;
@@ -1423,7 +1426,7 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
ha_archive::info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
- create_info->auto_increment_value= auto_increment_value;
+ create_info->auto_increment_value= stats.auto_increment_value;
}
if (*share->real_path)
create_info->data_file_name= share->real_path;
@@ -1440,8 +1443,8 @@ void ha_archive::info(uint flag)
This should be an accurate number now, though bulk and delayed inserts can
cause the number to be inaccurate.
*/
- records= share->rows_recorded;
- deleted= 0;
+ stats.records= share->rows_recorded;
+ stats.deleted= 0;
/* Costs quite a bit more to get all information */
if (flag & HA_STATUS_TIME)
{
@@ -1449,17 +1452,17 @@ void ha_archive::info(uint flag)
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
- mean_rec_length= table->s->reclength + buffer.alloced_length();
- data_file_length= file_stat.st_size;
- create_time= file_stat.st_ctime;
- update_time= file_stat.st_mtime;
- max_data_file_length= share->rows_recorded * mean_rec_length;
+ stats.mean_rec_length= table->s->reclength + buffer.alloced_length();
+ stats.data_file_length= file_stat.st_size;
+ stats.create_time= file_stat.st_ctime;
+ stats.update_time= file_stat.st_mtime;
+ stats.max_data_file_length= share->rows_recorded * stats.mean_rec_length;
}
- delete_length= 0;
- index_file_length=0;
+ stats.delete_length= 0;
+ stats.index_file_length=0;
if (flag & HA_STATUS_AUTO)
- auto_increment_value= share->auto_increment_value;
+ stats.auto_increment_value= share->auto_increment_value;
DBUG_VOID_RETURN;
}
@@ -1586,5 +1589,6 @@ mysql_declare_plugin(archive)
NULL, /* Plugin Init */
archive_db_done, /* Plugin Deinit */
0x0100 /* 1.0 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 4663531b674..f35858ff382 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -74,9 +74,9 @@ public:
const char *table_type() const { return "ARCHIVE"; }
const char *index_type(uint inx) { return "NONE"; }
const char **bas_ext() const;
- ulong table_flags() const
+ ulonglong table_flags() const
{
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_CAN_BIT_FIELD |
+ return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_CAN_BIT_FIELD |
HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index e9fd1c2319d..ffcf9536ff2 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -26,7 +26,8 @@
/* Static declarations for handlerton */
-static handler *blackhole_create_handler(TABLE_SHARE *table);
+static handler *blackhole_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
static const char blackhole_hton_name[]= "BLACKHOLE";
@@ -74,9 +75,10 @@ handlerton blackhole_hton= {
};
-static handler *blackhole_create_handler(TABLE_SHARE *table)
+static handler *blackhole_create_handler(TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
{
- return new ha_blackhole(table);
+ return new (mem_root) ha_blackhole(table);
}
@@ -171,16 +173,9 @@ void ha_blackhole::info(uint flag)
{
DBUG_ENTER("ha_blackhole::info");
- records= 0;
- deleted= 0;
- errkey= 0;
- mean_rec_length= 0;
- data_file_length= 0;
- index_file_length= 0;
- max_data_file_length= 0;
- delete_length= 0;
+ bzero((char*) &stats, sizeof(stats));
if (flag & HA_STATUS_AUTO)
- auto_increment_value= 1;
+ stats.auto_increment_value= 1;
DBUG_VOID_RETURN;
}
@@ -266,5 +261,6 @@ mysql_declare_plugin(blackhole)
NULL, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index 15e12659aa0..55c26f6f02e 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -40,12 +40,11 @@ public:
*/
const char *index_type(uint key_number);
const char **bas_ext() const;
- ulong table_flags() const
+ ulonglong table_flags() const
{
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED);
+ HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_FILE_BASED | HA_CAN_GEOMETRY | HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
@@ -84,5 +83,4 @@ public:
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
- bool has_transactions() { return 1; }
};
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index de69df90ed5..c70e21a0be7 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -74,7 +74,7 @@ static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
static int tina_init= 0;
-static handler *tina_create_handler(TABLE_SHARE *table);
+static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
static int tina_init_func();
static const char tina_hton_name[]= "CSV";
@@ -114,7 +114,8 @@ handlerton tina_hton= {
NULL, /* Fill FILES Table */
HTON_CAN_RECREATE,
NULL, /* binlog_func */
- NULL /* binlog_log_query */
+ NULL, /* binlog_log_query */
+ 0
};
/*****************************************************************************
@@ -478,9 +479,9 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
}
-static handler *tina_create_handler(TABLE_SHARE *table)
+static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_tina(table);
+ return new (mem_root) ha_tina(table);
}
@@ -507,8 +508,10 @@ ha_tina::ha_tina(TABLE_SHARE *table_arg)
int ha_tina::encode_quote(byte *buf)
{
char attribute_buffer[1024];
- String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin);
+ String attribute(attribute_buffer, sizeof(attribute_buffer),
+ &my_charset_bin);
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
buffer.length(0);
for (Field **field=table->field ; *field ; field++)
{
@@ -569,6 +572,7 @@ int ha_tina::encode_quote(byte *buf)
buffer.append('\n');
//buffer.replace(buffer.length(), 0, "\n", 1);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
return (buffer.length());
}
@@ -621,6 +625,7 @@ int ha_tina::find_current_row(byte *buf)
{
byte *mapped_ptr;
byte *end_ptr;
+ my_bitmap_map *org_bitmap;
DBUG_ENTER("ha_tina::find_current_row");
mapped_ptr= (byte *)share->mapped_file + current_position;
@@ -633,6 +638,9 @@ int ha_tina::find_current_row(byte *buf)
local_saved_data_file_length)) == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
+ /* Avoid asserts in ::store() for columns that are not going to be updated */
+ org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+
for (Field **field=table->field ; *field ; field++)
{
buffer.length(0);
@@ -676,11 +684,13 @@ int ha_tina::find_current_row(byte *buf)
buffer.append(*mapped_ptr);
}
}
- (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
+ if (bitmap_is_set(table->read_set, (*field)->field_index))
+ (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
}
next_position= (end_ptr - share->mapped_file)+1;
/* Maybe use \N for null? */
memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */
+ tmp_restore_column_map(table->write_set, org_bitmap);
DBUG_RETURN(0);
}
@@ -898,7 +908,7 @@ int ha_tina::write_row(byte * buf)
update_status();
pthread_mutex_unlock(&share->mutex);
- records++;
+ stats.records++;
DBUG_RETURN(0);
}
@@ -957,7 +967,7 @@ int ha_tina::delete_row(const byte * buf)
if (chain_append())
DBUG_RETURN(-1);
- --records;
+ stats.records--;
/* DELETE should never happen on the log table */
DBUG_ASSERT(!share->is_log_table);
@@ -1005,7 +1015,7 @@ int ha_tina::rnd_init(bool scan)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
current_position= next_position= 0;
- records= 0;
+ stats.records= 0;
records_is_known= 0;
chain_ptr= chain;
#ifdef HAVE_MADVISE
@@ -1047,7 +1057,7 @@ int ha_tina::rnd_next(byte *buf)
if ((rc= find_current_row(buf)))
DBUG_RETURN(rc);
- records++;
+ stats.records++;
DBUG_RETURN(0);
}
@@ -1090,8 +1100,8 @@ void ha_tina::info(uint flag)
{
DBUG_ENTER("ha_tina::info");
/* This is a lie, but you don't want the optimizer to see zero or 1 */
- if (!records_is_known && records < 2)
- records= 2;
+ if (!records_is_known && stats.records < 2)
+ stats.records= 2;
DBUG_VOID_RETURN;
}
@@ -1204,6 +1214,8 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
goto end;
}
+ /* Don't assert in field::val() functions */
+ table->use_all_columns();
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -1307,7 +1319,7 @@ int ha_tina::delete_all_rows()
if (get_mmap(share, 0) > 0)
DBUG_RETURN(-1);
- records=0;
+ stats.records=0;
DBUG_RETURN(rc);
}
@@ -1412,6 +1424,7 @@ mysql_declare_plugin(csv)
tina_init_func, /* Plugin Init */
tina_done_func, /* Plugin Deinit */
0x0100 /* 1.0 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index d155a614780..d3a8c5092b6 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -87,14 +87,16 @@ public:
const char *table_type() const { return "CSV"; }
const char *index_type(uint inx) { return "NONE"; }
const char **bas_ext() const;
- ulong table_flags() const
+ ulonglong table_flags() const
{
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT |
- HA_NO_AUTO_INCREMENT );
+ return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
{
- /* We will never have indexes so this will never be called(AKA we return zero) */
+ /*
+ We will never have indexes so this will never be called(AKA we return
+ zero)
+ */
return 0;
}
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
@@ -104,7 +106,7 @@ public:
/*
Called in test_quick_select to determine if indexes should be used.
*/
- virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
/* The next method will never be called */
virtual bool fast_key_read() { return 1;}
/*
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index 2ce543dfbb0..f2f4694b54e 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -72,7 +72,7 @@
#include <mysql/plugin.h>
-static handler* example_create_handler(TABLE_SHARE *table);
+static handler *example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
static int example_init_func();
static bool example_init_func_for_handlerton();
static int example_panic(enum ha_panic_function flag);
@@ -244,9 +244,9 @@ static int free_share(EXAMPLE_SHARE *share)
}
-static handler* example_create_handler(TABLE_SHARE *table)
+static handler* example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- return new ha_example(table);
+ return new (mem_root) ha_example(table);
}
@@ -756,6 +756,7 @@ mysql_declare_plugin(example)
example_init_func, /* Plugin Init */
example_done_func, /* Plugin Deinit */
0x0001 /* 0.1 */,
+ 0
}
mysql_declare_plugin_end;
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index 139a50a3281..956dc62311c 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -62,7 +62,7 @@ public:
implements. The current table flags are documented in
handler.h
*/
- ulong table_flags() const
+ ulonglong table_flags() const
{
return 0;
}
@@ -97,7 +97,7 @@ public:
/*
Called in test_quick_select to determine if indexes should be used.
*/
- virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
/*
The next method will never be called if you do not implement indexes.
*/
diff --git a/storage/heap/hp_extra.c b/storage/heap/hp_extra.c
index dd41d6c5f19..abb632707f2 100644
--- a/storage/heap/hp_extra.c
+++ b/storage/heap/hp_extra.c
@@ -32,13 +32,8 @@ int heap_extra(register HP_INFO *info, enum ha_extra_function function)
DBUG_ENTER("heap_extra");
switch (function) {
- case HA_EXTRA_RESET:
case HA_EXTRA_RESET_STATE:
- info->lastinx= -1;
- info->current_record= (ulong) ~0L;
- info->current_hash_ptr=0;
- info->update=0;
- break;
+ heap_reset(info);
case HA_EXTRA_NO_READCHECK:
info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */
break;
@@ -56,6 +51,16 @@ int heap_extra(register HP_INFO *info, enum ha_extra_function function)
} /* heap_extra */
+int heap_reset(HP_INFO *info)
+{
+ info->lastinx= -1;
+ info->current_record= (ulong) ~0L;
+ info->current_hash_ptr=0;
+ info->update=0;
+ return 0;
+}
+
+
/*
Start/Stop Inserting Duplicates Into a Table, WL#1648.
*/
diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c
index a74872dbd11..8d2a8bc3da2 100644
--- a/storage/heap/hp_test2.c
+++ b/storage/heap/hp_test2.c
@@ -469,7 +469,7 @@ int main(int argc, char *argv[])
#endif
printf("- Read through all records with scan\n");
- if (heap_extra(file,HA_EXTRA_RESET) || heap_extra(file,HA_EXTRA_CACHE))
+ if (heap_reset(file) || heap_extra(file,HA_EXTRA_CACHE))
{
puts("got error from heap_extra");
goto end;
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
index 8d48f533203..4f58b44722d 100644
--- a/storage/myisam/ft_boolean_search.c
+++ b/storage/myisam/ft_boolean_search.c
@@ -602,8 +602,9 @@ static int ftb_phrase_add_word(void *param, char *word, int word_len,
{
FT_WORD *phrase_word= (FT_WORD *)phrase->data;
FT_WORD *document_word= (FT_WORD *)document->data;
- if (my_strnncoll(phrase_param->cs, phrase_word->pos, phrase_word->len,
- document_word->pos, document_word->len))
+ if (my_strnncoll(phrase_param->cs, (uchar*) phrase_word->pos,
+ phrase_word->len,
+ (uchar*) document_word->pos, document_word->len))
return 0;
}
phrase_param->match++;
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index 04beb36bb47..c1ed29c4734 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -47,29 +47,6 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
DBUG_PRINT("enter",("function: %d",(int) function));
switch (function) {
- case HA_EXTRA_RESET:
- /*
- Free buffers and reset the following flags:
- EXTRA_CACHE, EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK
-
- If the row buffer cache is large (for dynamic tables), reduce it
- to save memory.
- */
- if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
- {
- info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
- error=end_io_cache(&info->rec_cache);
- }
- if (share->base.blobs)
- mi_alloc_rec_buff(info, -1, &info->rec_buff);
-#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
- if (info->opt_flag & MEMMAP_USED)
- madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
-#endif
- info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
- info->quick_mode=0;
- /* Fall through */
-
case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */
info->lastinx= 0; /* Use first index as def */
info->last_search_keypage=info->lastpos= HA_OFFSET_ERROR;
@@ -425,3 +402,36 @@ static void mi_extra_keyflag(MI_INFO *info, enum ha_extra_function function)
}
}
+
+int mi_reset(MI_INFO *info)
+{
+ int error= 0;
+ MYISAM_SHARE *share=info->s;
+ DBUG_ENTER("mi_reset");
+ /*
+ Free buffers and reset the following flags:
+ EXTRA_CACHE, EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK
+
+ If the row buffer cache is large (for dynamic tables), reduce it
+ to save memory.
+ */
+ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
+ {
+ info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
+ error= end_io_cache(&info->rec_cache);
+ }
+ if (share->base.blobs)
+ mi_alloc_rec_buff(info, -1, &info->rec_buff);
+#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
+ if (info->opt_flag & MEMMAP_USED)
+ madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
+#endif
+ info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
+ info->quick_mode=0;
+ info->lastinx= 0; /* Use first index as def */
+ info->last_search_keypage= info->lastpos= HA_OFFSET_ERROR;
+ info->page_changed= 1;
+ info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND |
+ HA_STATE_PREV_FOUND);
+ DBUG_RETURN(error);
+}
diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c
index 05f8459a4b4..2117e9fdf15 100644
--- a/storage/myisam/mi_search.c
+++ b/storage/myisam/mi_search.c
@@ -259,15 +259,16 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
{
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
- DBUG_PRINT("error",("Found wrong key: length: %u page: %lx end: %lx",
- length, (long) page, (long) end));
+ DBUG_PRINT("error",
+ ("Found wrong key: length: %u page: 0x%lx end: 0x%lx",
+ length, (long) page, (long) end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
if ((flag=ha_key_cmp(keyinfo->seg,t_buff,key,key_len,comp_flag,
not_used)) >= 0)
break;
#ifdef EXTRA_DEBUG
- DBUG_PRINT("loop",("page: %lx key: '%s' flag: %d", (long) page, t_buff,
+ DBUG_PRINT("loop",("page: 0x%lx key: '%s' flag: %d", (long) page, t_buff,
flag));
#endif
memcpy(buff,t_buff,length);
@@ -276,7 +277,7 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
if (flag == 0)
memcpy(buff,t_buff,length); /* Result is first key */
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: %lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
DBUG_RETURN(flag);
} /* _mi_seq_search */
@@ -416,8 +417,9 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
{
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
- DBUG_PRINT("error",("Found wrong key: length: %u page: %lx end: %lx",
- length, (long) page, (long) end));
+ DBUG_PRINT("error",
+ ("Found wrong key: length: %u page: 0x%lx end: %lx",
+ length, (long) page, (long) end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
@@ -551,7 +553,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: %lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
DBUG_RETURN(flag);
} /* _mi_prefix_search */
@@ -813,7 +815,7 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (length > keyseg->length)
{
DBUG_PRINT("error",
- ("Found too long null packed key: %u of %u at %lx",
+ ("Found too long null packed key: %u of %u at 0x%lx",
length, keyseg->length, (long) *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
mi_print_error(keyinfo->share, HA_ERR_CRASHED);
@@ -870,7 +872,7 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
}
if (length > (uint) keyseg->length)
{
- DBUG_PRINT("error",("Found too long packed key: %u of %u at %lx",
+ DBUG_PRINT("error",("Found too long packed key: %u of %u at 0x%lx",
length, keyseg->length, (long) *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
mi_print_error(keyinfo->share, HA_ERR_CRASHED);
@@ -936,8 +938,9 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
{
if (length > keyinfo->maxlength)
{
- DBUG_PRINT("error",("Found too long binary packed key: %u of %u at %lx",
- length, keyinfo->maxlength, (long) *page_pos));
+ DBUG_PRINT("error",
+ ("Found too long binary packed key: %u of %u at 0x%lx",
+ length, keyinfo->maxlength, (long) *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
mi_print_error(keyinfo->share, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
@@ -984,7 +987,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
length-=tmp;
from=page; from_end=page_end;
}
- DBUG_PRINT("info",("key: %lx from: %lx length: %u",
+ DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u",
(long) key, (long) from, length));
memmove((byte*) key, (byte*) from, (size_t) length);
key+=length;
@@ -1042,7 +1045,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
}
}
}
- DBUG_PRINT("exit",("page: %lx length: %u", (long) page,
+ DBUG_PRINT("exit",("page: 0x%lx length: %u", (long) page,
*return_key_length));
DBUG_RETURN(page);
} /* _mi_get_key */
@@ -1095,7 +1098,8 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
uint nod_flag;
uchar *lastpos;
DBUG_ENTER("_mi_get_last_key");
- DBUG_PRINT("enter",("page: %lx endpos: %lx", (long) page, (long) endpos));
+ DBUG_PRINT("enter",("page: 0x%lx endpos: 0x%lx", (long) page,
+ (long) endpos));
nod_flag=mi_test_if_nod(page);
if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY)))
@@ -1115,7 +1119,7 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
*return_key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&page,lastkey);
if (*return_key_length == 0)
{
- DBUG_PRINT("error",("Couldn't find last key: page: %lx",
+ DBUG_PRINT("error",("Couldn't find last key: page: 0x%lx",
(long) page));
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
@@ -1123,7 +1127,7 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
}
}
}
- DBUG_PRINT("exit",("lastpos: %lx length: %u", (long) lastpos,
+ DBUG_PRINT("exit",("lastpos: 0x%lx length: %u", (long) lastpos,
*return_key_length));
DBUG_RETURN(lastpos);
} /* _mi_get_last_key */
@@ -1660,7 +1664,7 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key,
ref_length=0;
next_length_pack=0;
}
- DBUG_PRINT("test",("length: %d next_key: %lx", length,
+ DBUG_PRINT("test",("length: %d next_key: 0x%lx", length,
(long) next_key));
{
diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c
index 357ebb1b9bc..357128b7a40 100644
--- a/storage/myisam/mi_test2.c
+++ b/storage/myisam/mi_test2.c
@@ -708,7 +708,7 @@ int main(int argc, char *argv[])
if (!silent)
printf("- mi_extra(CACHE) + mi_rrnd.... + mi_extra(NO_CACHE)\n");
- if (mi_extra(file,HA_EXTRA_RESET,0) || mi_extra(file,HA_EXTRA_CACHE,0))
+ if (mi_reset(file) || mi_extra(file,HA_EXTRA_CACHE,0))
{
if (locking || (!use_blob && !pack_fields))
{
@@ -751,7 +751,7 @@ int main(int argc, char *argv[])
DBUG_PRINT("progpos",("Removing keys"));
lastpos = HA_OFFSET_ERROR;
/* DBUG_POP(); */
- mi_extra(file,HA_EXTRA_RESET,0);
+ mi_reset(file);
found_parts=0;
while ((error=mi_rrnd(file,read_record,HA_OFFSET_ERROR)) !=
HA_ERR_END_OF_FILE)
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index 5b3067cb115..556d0f46145 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -3033,7 +3033,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf)
{
isam_info= *(info->current=info->file);
info->end=info->current+info->count;
- mi_extra(isam_info, HA_EXTRA_RESET, 0);
+ mi_reset(isam_info);
mi_extra(isam_info, HA_EXTRA_CACHE, 0);
filepos=isam_info->s->pack.header_length;
}
@@ -3056,7 +3056,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf)
info->current++;
isam_info= *info->current;
filepos=isam_info->s->pack.header_length;
- mi_extra(isam_info,HA_EXTRA_RESET, 0);
+ mi_reset(isam_info);
mi_extra(isam_info,HA_EXTRA_CACHE, 0);
}
}
diff --git a/storage/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c
index 62cf5f01aba..ef7eeb9d4d9 100644
--- a/storage/myisammrg/myrg_extra.c
+++ b/storage/myisammrg/myrg_extra.c
@@ -38,10 +38,10 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function,
}
else
{
- if (function == HA_EXTRA_NO_CACHE || function == HA_EXTRA_RESET ||
- function == HA_EXTRA_PREPARE_FOR_UPDATE)
+ if (function == HA_EXTRA_NO_CACHE ||
+ function == HA_EXTRA_PREPARE_FOR_UPDATE)
info->cache_in_use=0;
- if (function == HA_EXTRA_RESET || function == HA_EXTRA_RESET_STATE)
+ if (function == HA_EXTRA_RESET_STATE)
{
info->current_table=0;
info->last_used_table=info->open_tables;
@@ -66,3 +66,23 @@ void myrg_extrafunc(MYRG_INFO *info, invalidator_by_filename inv)
DBUG_VOID_RETURN;
}
+
+
+int myrg_reset(MYRG_INFO *info)
+{
+ int save_error= 0;
+ MYRG_TABLE *file;
+ DBUG_ENTER("myrg_reset");
+
+ info->cache_in_use=0;
+ info->current_table=0;
+ info->last_used_table= info->open_tables;
+
+ for (file=info->open_tables ; file != info->end_table ; file++)
+ {
+ int error;
+ if ((error= mi_reset(file->table)))
+ save_error=error;
+ }
+ DBUG_RETURN(save_error);
+}
diff --git a/unittest/mysys/base64.t.c b/unittest/mysys/base64.t.c
index 1b4f2eb2356..6d85964b20d 100644
--- a/unittest/mysys/base64.t.c
+++ b/unittest/mysys/base64.t.c
@@ -15,9 +15,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA */
+#include <my_global.h>
#include <base64.h>
#include <tap.h>
-#include <stdlib.h>
#include <string.h>
int