diff options
author | unknown <monty@mashka.mysql.fi> | 2003-07-01 13:30:55 +0300 |
---|---|---|
committer | unknown <monty@mashka.mysql.fi> | 2003-07-01 13:30:55 +0300 |
commit | 0fe578f52a7b699cc4ff726428c9701530d9f827 (patch) | |
tree | 4ffa2312169bb369a48398d8e9ed4c6c805c3c73 | |
parent | 9e6471caf850a9c565cfde0a1545cb14ebd19bf8 (diff) | |
parent | bbf8ebe87d7c3d3d07f56457168e8faa943f7d64 (diff) | |
download | mariadb-git-0fe578f52a7b699cc4ff726428c9701530d9f827.tar.gz |
Merge bk-internal.mysql.com:/home/bk/mysql-4.0
into mashka.mysql.fi:/home/my/mysql-4.0
sql/item_strfunc.cc:
Auto merged
sql/sql_select.cc:
Auto merged
38 files changed, 381 insertions, 155 deletions
diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium-debug-max index f71b849384f..1684686ce8c 100755 --- a/BUILD/compile-pentium-debug-max +++ b/BUILD/compile-pentium-debug-max @@ -3,7 +3,7 @@ path=`dirname $0` . "$path/SETUP.sh" -extra_flags="$pentium_cflags $debug_cflags -DBIG_TABLES" +extra_flags="$pentium_cflags $debug_cflags" c_warnings="$c_warnings $debug_extra_warnings" cxx_warnings="$cxx_warnings $debug_extra_warnings" extra_configs="$pentium_configs $debug_configs" diff --git a/myisam/mi_info.c b/myisam/mi_info.c index 32767e73bb1..f4eace198f9 100644 --- a/myisam/mi_info.c +++ b/myisam/mi_info.c @@ -80,15 +80,17 @@ int mi_status(MI_INFO *info, register MI_ISAMINFO *x, uint flag) (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ? 0L : share->base.pack_reclength); x->sortkey= -1; /* No clustering */ - /* The following should be included even if we are not compiling with - USE_RAID as the client must be able to request it! */ x->rec_per_key = share->state.rec_per_key_part; - x->raid_type= share->base.raid_type; - x->raid_chunks= share->base.raid_chunks; - x->raid_chunksize= share->base.raid_chunksize; x->key_map = share->state.key_map; x->data_file_name = share->data_file_name; x->index_file_name = share->index_file_name; + /* + The following should be included even if we are not compiling with + USE_RAID as the client must be able to request it! + */ + x->raid_type= share->base.raid_type; + x->raid_chunks= share->base.raid_chunks; + x->raid_chunksize= share->base.raid_chunksize; } if ((flag & HA_STATUS_TIME) && !my_fstat(info->dfile,&state,MYF(0))) x->update_time=state.st_mtime; diff --git a/mysql-test/r/convert.result b/mysql-test/r/convert.result new file mode 100644 index 00000000000..f8dad8c69ba --- /dev/null +++ b/mysql-test/r/convert.result @@ -0,0 +1,17 @@ +select @@convert_character_set; +@@convert_character_set + +select @@global.convert_character_set; +@@global.convert_character_set + +show variables like "%convert_character_set%"; +Variable_name Value +convert_character_set +SET CHARACTER SET cp1251_koi8; +select @@convert_character_set; +@@convert_character_set +cp1251_koi8 +SET CHARACTER SET DEFAULT; +select @@convert_character_set; +@@convert_character_set + diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result index 5f4f7cced1e..53a20eeea0b 100644 --- a/mysql-test/r/distinct.result +++ b/mysql-test/r/distinct.result @@ -161,7 +161,7 @@ SELECT DISTINCT UserId FROM t1 WHERE UserId=22; UserId drop table t1; CREATE TABLE t1 (a int(10) unsigned not null primary key,b int(10) unsigned); -INSERT INTO t1 VALUES (1,1),(2,1); +INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1); CREATE TABLE t2 (a int(10) unsigned not null, key (A)); INSERT INTO t2 VALUES (1),(2); CREATE TABLE t3 (a int(10) unsigned, key(A), b text); @@ -189,7 +189,7 @@ insert into t4 select * from t3; insert into t3 select * from t4; explain select distinct t1.a from t1,t3 where t1.a=t3.a; table type possible_keys key key_len ref rows Extra -t1 index PRIMARY PRIMARY 4 NULL 2 Using index; Using temporary +t1 index PRIMARY PRIMARY 4 NULL 4 Using index; Using temporary t3 ref a a 5 t1.a 10 Using where; Using index; Distinct select distinct t1.a from t1,t3 where t1.a=t3.a; a @@ -200,16 +200,16 @@ select distinct 1 from t1,t3 where t1.a=t3.a; 1 explain SELECT distinct t1.a from t1; table type possible_keys key key_len ref rows Extra -t1 index NULL PRIMARY 4 NULL 2 Using index +t1 index NULL PRIMARY 4 NULL 4 Using index explain SELECT distinct t1.a from t1 order by a desc; table type possible_keys key key_len ref rows Extra -t1 index NULL PRIMARY 4 NULL 2 Using index +t1 index NULL PRIMARY 4 NULL 4 Using index explain SELECT t1.a from t1 group by a order by a desc; table type possible_keys key key_len ref rows Extra -t1 index NULL PRIMARY 4 NULL 2 Using index +t1 index NULL PRIMARY 4 NULL 4 Using index explain SELECT distinct t1.a from t1 order by a desc limit 1; table type possible_keys key key_len ref rows Extra -t1 index NULL PRIMARY 4 NULL 2 Using index +t1 index NULL PRIMARY 4 NULL 4 Using index explain SELECT distinct a from t3 order by a desc limit 2; table type possible_keys key key_len ref rows Extra t3 index NULL a 5 NULL 204 Using index diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 94e1ce59585..67e6bec09f6 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -534,11 +534,11 @@ a b explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b; table type possible_keys key key_len ref rows Extra t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort -t2 ALL a NULL NULL NULL 4 Using where +t2 ALL a NULL NULL NULL 3 Using where explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b ORDER BY NULL; table type possible_keys key key_len ref rows Extra t1 ALL NULL NULL NULL NULL 6 Using temporary -t2 ALL a NULL NULL NULL 4 Using where +t2 ALL a NULL NULL NULL 3 Using where drop table t1,t2; create table t1 (a int, b int); insert into t1 values (1, 4),(10, 40),(1, 4),(10, 43),(1, 4),(10, 41),(1, 4),(10, 43),(1, 4); diff --git a/mysql-test/r/heap.result b/mysql-test/r/heap.result index d8905085e34..a04ddf3f302 100644 --- a/mysql-test/r/heap.result +++ b/mysql-test/r/heap.result @@ -166,7 +166,7 @@ alter table t1 add column new_col char(1) not null, add key (btn,new_col), drop update t1 set new_col=btn; explain select * from t1 where btn="a"; table type possible_keys key key_len ref rows Extra -t1 ALL btn NULL NULL NULL 14 Using where +t1 ALL btn NULL NULL NULL 11 Using where explain select * from t1 where btn="a" and new_col="a"; table type possible_keys key key_len ref rows Extra t1 ref btn btn 11 const,const 10 Using where diff --git a/mysql-test/r/insert_select.result b/mysql-test/r/insert_select.result index b49ca3a6c2f..c403ff8532d 100644 --- a/mysql-test/r/insert_select.result +++ b/mysql-test/r/insert_select.result @@ -21,8 +21,7 @@ payoutID 20 22 drop table t1,t2; -DROP TABLE IF EXISTS crash1,crash2; -CREATE TABLE `crash1` ( +CREATE TABLE `t1` ( `numeropost` bigint(20) unsigned NOT NULL default '0', `icone` tinyint(4) unsigned NOT NULL default '0', `numreponse` bigint(20) unsigned NOT NULL auto_increment, @@ -37,7 +36,7 @@ KEY `date` (`date`), KEY `pseudo` (`pseudo`), KEY `numreponse` (`numreponse`) ) TYPE=MyISAM; -CREATE TABLE `crash2` ( +CREATE TABLE `t2` ( `numeropost` bigint(20) unsigned NOT NULL default '0', `icone` tinyint(4) unsigned NOT NULL default '0', `numreponse` bigint(20) unsigned NOT NULL auto_increment, @@ -52,22 +51,20 @@ KEY `date` (`date`), KEY `pseudo` (`pseudo`), KEY `numreponse` (`numreponse`) ) TYPE=MyISAM; -INSERT INTO crash2 +INSERT INTO t2 (numeropost,icone,numreponse,contenu,pseudo,date,ip,signature) VALUES (9,1,56,'test','joce','2001-07-25 13:50:53' ,3649052399,0); -INSERT INTO crash1 (numeropost,icone,contenu,pseudo,date,signature,ip) -SELECT 1618,icone,contenu,pseudo,date,signature,ip FROM crash2 +INSERT INTO t1 (numeropost,icone,contenu,pseudo,date,signature,ip) +SELECT 1618,icone,contenu,pseudo,date,signature,ip FROM t2 WHERE numeropost=9 ORDER BY numreponse ASC; show variables like '%bulk%'; Variable_name Value bulk_insert_buffer_size 8388608 -INSERT INTO crash1 (numeropost,icone,contenu,pseudo,date,signature,ip) -SELECT 1718,icone,contenu,pseudo,date,signature,ip FROM crash2 +INSERT INTO t1 (numeropost,icone,contenu,pseudo,date,signature,ip) +SELECT 1718,icone,contenu,pseudo,date,signature,ip FROM t2 WHERE numeropost=9 ORDER BY numreponse ASC; -DROP TABLE IF EXISTS crash1,crash2; -drop table if exists t1; -drop table if exists t2; +DROP TABLE t1,t2; create table t1(a int, unique(a)); insert into t1 values(2); create table t2(a int); diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result index e063b5c3e02..039b6e1cba3 100644 --- a/mysql-test/r/join.result +++ b/mysql-test/r/join.result @@ -104,9 +104,7 @@ KEY category (category,county,state) INSERT INTO t2 VALUES (3,2,11,12,5400,7800); INSERT INTO t2 VALUES (4,2,25,12,6500,11200); INSERT INTO t2 VALUES (5,1,37,6,10000,12000); -select a.id, b.category as catid, b.state as stateid, b.county as -countyid from t1 a, t2 b where (a.token = -'a71250b7ed780f6ef3185bfffe027983') and (a.count = b.id); +select a.id, b.category as catid, b.state as stateid, b.county as countyid from t1 a, t2 b ignore index (primary) where (a.token ='a71250b7ed780f6ef3185bfffe027983') and (a.count = b.id); id catid stateid countyid 27 2 12 11 28 2 12 11 diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index 81266f6562e..8f3f82201c3 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -659,3 +659,11 @@ i i i 1 NULL NULL 2 2 2 drop table t1,t2,t3; +create table t1 (f1 integer,f2 integer,f3 integer); +create table t2 (f2 integer,f4 integer); +create table t3 (f3 integer,f5 integer); +select * from t1 +left outer join t2 using (f2) +left outer join t3 using (f3); +Unknown column 'test.t2.f3' in 'on clause' +drop table t1,t2,t3; diff --git a/mysql-test/r/key_diff.result b/mysql-test/r/key_diff.result index 4eaccc696f9..2d4bc19474f 100644 --- a/mysql-test/r/key_diff.result +++ b/mysql-test/r/key_diff.result @@ -36,7 +36,7 @@ a a a a explain select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B; table type possible_keys key key_len ref rows Extra t1 ALL a NULL NULL NULL 5 -t2 ALL b NULL NULL NULL 5 Using where +t2 ALL b NULL NULL NULL 4 Using where select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B order by binary t1.a,t2.a; a b a b A B a a diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 9a653aff99e..c4368384bf8 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -335,12 +335,12 @@ t1 1 c_2 1 c A 5 NULL NULL YES BTREE t1 1 c_2 2 a A 5 NULL NULL BTREE explain select * from t1,t2 where t1.a=t2.a; table type possible_keys key key_len ref rows Extra -t1 ALL a NULL NULL NULL 5 -t2 ALL a NULL NULL NULL 2 Using where +t2 ALL a NULL NULL NULL 2 +t1 ALL a NULL NULL NULL 4 Using where explain select * from t1,t2 force index(a) where t1.a=t2.a; table type possible_keys key key_len ref rows Extra t2 ALL a NULL NULL NULL 2 -t1 ALL a NULL NULL NULL 5 Using where +t1 ALL a NULL NULL NULL 4 Using where explain select * from t1 force index(a),t2 force index(a) where t1.a=t2.a; table type possible_keys key key_len ref rows Extra t2 ALL a NULL NULL NULL 2 @@ -351,8 +351,8 @@ t2 ALL b NULL NULL NULL 2 t1 ref b b 5 t2.b 1 Using where explain select * from t1,t2 force index(c) where t1.a=t2.a; table type possible_keys key key_len ref rows Extra -t1 ALL a NULL NULL NULL 5 -t2 ALL NULL NULL NULL NULL 2 Using where +t2 ALL NULL NULL NULL NULL 2 +t1 ALL a NULL NULL NULL 4 Using where explain select * from t1 where a=0 or a=2; table type possible_keys key key_len ref rows Extra t1 ALL a NULL NULL NULL 5 Using where diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index ff2dd93311c..64fac8af872 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -307,17 +307,17 @@ table type possible_keys key key_len ref rows Extra t1 range a a 9 NULL 8 Using where; Using index explain select * from t1 where a = 2 and b >0 order by a desc,b desc; table type possible_keys key key_len ref rows Extra -t1 range a a 9 NULL 5 Using where; Using index +t1 range a a 9 NULL 4 Using where; Using index explain select * from t1 where a = 2 and b is null order by a desc,b desc; table type possible_keys key key_len ref rows Extra t1 ref a a 9 const,const 1 Using where; Using index; Using filesort explain select * from t1 where a = 2 and (b is null or b > 0) order by a desc,b desc; table type possible_keys key key_len ref rows Extra -t1 range a a 9 NULL 6 Using where; Using index +t1 range a a 9 NULL 5 Using where; Using index explain select * from t1 where a = 2 and b > 0 order by a desc,b desc; table type possible_keys key key_len ref rows Extra -t1 range a a 9 NULL 5 Using where; Using index +t1 range a a 9 NULL 4 Using where; Using index explain select * from t1 where a = 2 and b < 2 order by a desc,b desc; table type possible_keys key key_len ref rows Extra t1 range a a 9 NULL 2 Using where; Using index diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result index a4c2533ec1a..811c396ea67 100644 --- a/mysql-test/r/select.result +++ b/mysql-test/r/select.result @@ -2569,16 +2569,46 @@ fld1 fld1 250503 250505 250504 250505 250505 250505 +insert into t2 (fld1, companynr) values (999999,99); select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null; companynr companyname +99 NULL +select count(*) from t2 left join t4 using (companynr) where t4.companynr is not null; +count(*) +1199 explain select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null; table type possible_keys key key_len ref rows Extra -t2 ALL NULL NULL NULL NULL 1199 +t2 ALL NULL NULL NULL NULL 1200 t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using where; Not exists explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr is null; table type possible_keys key key_len ref rows Extra t4 ALL NULL NULL NULL NULL 12 -t2 ALL NULL NULL NULL NULL 1199 Using where; Not exists +t2 ALL NULL NULL NULL NULL 1200 Using where; Not exists +delete from t2 where fld1=999999; +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0; +table type possible_keys key key_len ref rows Extra +t2 ALL NULL NULL NULL NULL 1199 Using where +t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0; +table type possible_keys key key_len ref rows Extra +t2 ALL NULL NULL NULL NULL 1199 Using where +t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0; +table type possible_keys key key_len ref rows Extra +t2 ALL NULL NULL NULL NULL 1199 Using where +t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using where +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null; +table type possible_keys key key_len ref rows Extra +t4 ALL NULL NULL NULL NULL 12 +t2 ALL NULL NULL NULL NULL 1199 Using where +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0 or t4.companynr > 0; +table type possible_keys key key_len ref rows Extra +t4 ALL PRIMARY NULL NULL NULL 12 +t2 ALL NULL NULL NULL NULL 1199 Using where +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where ifnull(t2.companynr,1)>0; +table type possible_keys key key_len ref rows Extra +t4 ALL NULL NULL NULL NULL 12 +t2 ALL NULL NULL NULL NULL 1199 Using where select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1; companynr companynr 37 36 diff --git a/mysql-test/r/select_safe.result b/mysql-test/r/select_safe.result index 3303f19d9c7..ca5c03bdb50 100644 --- a/mysql-test/r/select_safe.result +++ b/mysql-test/r/select_safe.result @@ -13,7 +13,7 @@ a b 1 test 2 test2 update t1 set b="a" where a=1; -select 1 from t1,t1 as t2,t1 as t3,t1 as t4; +select 1 from t1,t1 as t2,t1 as t3; 1 1 1 @@ -35,18 +35,19 @@ update t1 set b="a" limit 1; update t1 set b="a" where b="b" limit 2; delete from t1 where b="test" limit 1; delete from t1 where a+0=1 limit 2; +alter table t1 add key b (b); SET MAX_JOIN_SIZE=2; SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS; @@max_join_size @@sql_big_selects 2 0 insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"); -SELECT * from t1; +SELECT * from t1 order by a; The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok SET SQL_BIG_SELECTS=1; -SELECT * from t1; +SELECT * from t1 order by a; a b -3 a 2 test2 +3 a 4 a 5 a SET MAX_JOIN_SIZE=2; @@ -55,9 +56,26 @@ The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET MAX_JOIN_SIZE=DEFAULT; SELECT * from t1; a b -3 a 2 test2 +3 a 4 a 5 a +SELECT @@MAX_SEEKS_FOR_KEY; +@@max_seeks_for_key +4294967295 +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"); +explain select * from t1,t1 as t2 where t1.b=t2.b; +table type possible_keys key key_len ref rows Extra +t1 ALL b NULL NULL NULL 21 +t2 ALL b NULL NULL NULL 16 Using where +set MAX_SEEKS_FOR_KEY=1; +explain select * from t1,t1 as t2 where t1.b=t2.b; +table type possible_keys key key_len ref rows Extra +t1 ALL b NULL NULL NULL 21 +t2 ref b b 21 t1.b 6 Using where +SET MAX_SEEKS_FOR_KEY=DEFAULT; drop table t1; SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT; diff --git a/mysql-test/t/convert.test b/mysql-test/t/convert.test new file mode 100644 index 00000000000..f26ef3a8c72 --- /dev/null +++ b/mysql-test/t/convert.test @@ -0,0 +1,11 @@ +# Test of character set conversions + +# Test that SET DEFAULT works + +select @@convert_character_set; +select @@global.convert_character_set; +show variables like "%convert_character_set%"; +SET CHARACTER SET cp1251_koi8; +select @@convert_character_set; +SET CHARACTER SET DEFAULT; +select @@convert_character_set; diff --git a/mysql-test/t/distinct.test b/mysql-test/t/distinct.test index 7f75b6b1687..ecce2409571 100644 --- a/mysql-test/t/distinct.test +++ b/mysql-test/t/distinct.test @@ -60,7 +60,7 @@ drop table t1; # CREATE TABLE t1 (a int(10) unsigned not null primary key,b int(10) unsigned); -INSERT INTO t1 VALUES (1,1),(2,1); +INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1); CREATE TABLE t2 (a int(10) unsigned not null, key (A)); INSERT INTO t2 VALUES (1),(2); CREATE TABLE t3 (a int(10) unsigned, key(A), b text); diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test index 695f891f678..3b51168269d 100644 --- a/mysql-test/t/insert_select.test +++ b/mysql-test/t/insert_select.test @@ -12,13 +12,13 @@ insert into t2 (payoutID) SELECT payoutID+10 FROM t1; insert ignore into t2 (payoutID) SELECT payoutID+10 FROM t1; select * from t2; drop table t1,t2; + # # bug in bulk insert optimization # test case by Fournier Jocelyn <joc@presence-pc.com> # -DROP TABLE IF EXISTS crash1,crash2; -CREATE TABLE `crash1` ( +CREATE TABLE `t1` ( `numeropost` bigint(20) unsigned NOT NULL default '0', `icone` tinyint(4) unsigned NOT NULL default '0', `numreponse` bigint(20) unsigned NOT NULL auto_increment, @@ -34,7 +34,7 @@ CREATE TABLE `crash1` ( KEY `numreponse` (`numreponse`) ) TYPE=MyISAM; -CREATE TABLE `crash2` ( +CREATE TABLE `t2` ( `numeropost` bigint(20) unsigned NOT NULL default '0', `icone` tinyint(4) unsigned NOT NULL default '0', `numreponse` bigint(20) unsigned NOT NULL auto_increment, @@ -50,30 +50,27 @@ CREATE TABLE `crash2` ( KEY `numreponse` (`numreponse`) ) TYPE=MyISAM; -INSERT INTO crash2 +INSERT INTO t2 (numeropost,icone,numreponse,contenu,pseudo,date,ip,signature) VALUES (9,1,56,'test','joce','2001-07-25 13:50:53' ,3649052399,0); -INSERT INTO crash1 (numeropost,icone,contenu,pseudo,date,signature,ip) -SELECT 1618,icone,contenu,pseudo,date,signature,ip FROM crash2 +INSERT INTO t1 (numeropost,icone,contenu,pseudo,date,signature,ip) +SELECT 1618,icone,contenu,pseudo,date,signature,ip FROM t2 WHERE numeropost=9 ORDER BY numreponse ASC; show variables like '%bulk%'; -INSERT INTO crash1 (numeropost,icone,contenu,pseudo,date,signature,ip) -SELECT 1718,icone,contenu,pseudo,date,signature,ip FROM crash2 +INSERT INTO t1 (numeropost,icone,contenu,pseudo,date,signature,ip) +SELECT 1718,icone,contenu,pseudo,date,signature,ip FROM t2 WHERE numeropost=9 ORDER BY numreponse ASC; -DROP TABLE IF EXISTS crash1,crash2; - +DROP TABLE t1,t2; -# Addendum by Guilhem: # Check if a partly-completed INSERT SELECT in a MyISAM table goes # into the binlog -drop table if exists t1; -drop table if exists t2; + create table t1(a int, unique(a)); insert into t1 values(2); create table t2(a int); diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index 08cc5731723..19e04d2aa7e 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -98,10 +98,7 @@ CREATE TABLE t2 ( INSERT INTO t2 VALUES (3,2,11,12,5400,7800); INSERT INTO t2 VALUES (4,2,25,12,6500,11200); INSERT INTO t2 VALUES (5,1,37,6,10000,12000); - -select a.id, b.category as catid, b.state as stateid, b.county as -countyid from t1 a, t2 b where (a.token = -'a71250b7ed780f6ef3185bfffe027983') and (a.count = b.id); +select a.id, b.category as catid, b.state as stateid, b.county as countyid from t1 a, t2 b ignore index (primary) where (a.token ='a71250b7ed780f6ef3185bfffe027983') and (a.count = b.id); select a.id, b.category as catid, b.state as stateid, b.county as countyid from t1 a, t2 b where (a.token = 'a71250b7ed780f6ef3185bfffe027983') and (a.count = b.id) order by a.id; diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test index e172d54194e..ee7d55d2a4e 100644 --- a/mysql-test/t/join_outer.test +++ b/mysql-test/t/join_outer.test @@ -425,3 +425,15 @@ insert into t2 values(2),(3); insert into t3 values(2),(4); select * from t1 natural left join t2 natural left join t3; drop table t1,t2,t3; + +# +# Test of USING +# +create table t1 (f1 integer,f2 integer,f3 integer); +create table t2 (f2 integer,f4 integer); +create table t3 (f3 integer,f5 integer); +--error 1054 +select * from t1 + left outer join t2 using (f2) + left outer join t3 using (f3); +drop table t1,t2,t3; diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test index de90eeec2e8..c2e451ea7f3 100644 --- a/mysql-test/t/select.test +++ b/mysql-test/t/select.test @@ -1527,10 +1527,24 @@ select t2.fld1,t22.fld1 from t2,t2 t22 where t2.fld1 >= 250501 and t2.fld1 <= 25 # # Test of left join. # +insert into t2 (fld1, companynr) values (999999,99); select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null; +select count(*) from t2 left join t4 using (companynr) where t4.companynr is not null; explain select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null; explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr is null; +delete from t2 where fld1=999999; + +# +# Test left join optimization + +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0; +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0; +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0; +# Following can't be optimized +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null; +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0 or t4.companynr > 0; +explain select t2.companynr,companyname from t4 left join t2 using (companynr) where ifnull(t2.companynr,1)>0; # # Joins with forms. diff --git a/mysql-test/t/select_safe.test b/mysql-test/t/select_safe.test index a085cfee29d..206f911d028 100644 --- a/mysql-test/t/select_safe.test +++ b/mysql-test/t/select_safe.test @@ -12,7 +12,7 @@ delete from t1 where a=1; insert into t1 values(1,"test"),(2,"test2"); SELECT SQL_BUFFER_RESULT * from t1; update t1 set b="a" where a=1; -select 1 from t1,t1 as t2,t1 as t3,t1 as t4; +select 1 from t1,t1 as t2,t1 as t3; # The following should give errors: --error 1175 @@ -36,19 +36,31 @@ delete from t1 where a+0=1 limit 2; # Test SQL_BIG_SELECTS +alter table t1 add key b (b); SET MAX_JOIN_SIZE=2; SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS; insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"); --error 1104 -SELECT * from t1; +SELECT * from t1 order by a; SET SQL_BIG_SELECTS=1; -SELECT * from t1; +SELECT * from t1 order by a; SET MAX_JOIN_SIZE=2; --error 1104 SELECT * from t1; SET MAX_JOIN_SIZE=DEFAULT; SELECT * from t1; +# +# Test MAX_SEEKS_FOR_KEY +# +SELECT @@MAX_SEEKS_FOR_KEY; +analyze table t1; +insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"); +explain select * from t1,t1 as t2 where t1.b=t2.b; +set MAX_SEEKS_FOR_KEY=1; +explain select * from t1,t1 as t2 where t1.b=t2.b; +SET MAX_SEEKS_FOR_KEY=DEFAULT; + drop table t1; SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT; diff --git a/mysys/tree.c b/mysys/tree.c index ea5cf79f084..2b5ea717809 100644 --- a/mysys/tree.c +++ b/mysys/tree.c @@ -90,6 +90,11 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit, if (!free_element && size >= 0 && ((uint) size <= sizeof(void*) || ((uint) size & (sizeof(void*)-1)))) { + /* + We know that the data doesn't have to be aligned (like if the key + contains a double), so we can store the data combined with the + TREE_ELEMENT. + */ tree->offset_to_key=sizeof(TREE_ELEMENT); /* Put key after element */ /* Fix allocation size so that we don't lose any memory */ default_alloc_size/=(sizeof(TREE_ELEMENT)+size); diff --git a/sql/handler.h b/sql/handler.h index 56f63d1d521..dfcfa44fbcd 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -184,40 +184,41 @@ class handler :public Sql_alloc { protected: struct st_table *table; /* The table definition */ - uint active_index; public: byte *ref; /* Pointer to current row */ byte *dupp_ref; /* Pointer to dupp row */ - uint ref_length; /* Length of ref (1-8 or the clustered - key length) */ - uint block_size; /* index block size */ - ha_rows records; /* Records i datafilen */ - ha_rows deleted; /* Deleted records */ ulonglong data_file_length; /* Length off data file */ ulonglong max_data_file_length; /* Length off data file */ ulonglong index_file_length; ulonglong max_index_file_length; ulonglong delete_length; /* Free bytes */ ulonglong auto_increment_value; - uint raid_type,raid_chunks; + ha_rows records; /* Records in table */ + ha_rows deleted; /* Deleted records */ ulong raid_chunksize; - uint errkey; /* Last dup key */ - uint sortkey, key_used_on_scan; + ulong mean_rec_length; /* physical reclength */ time_t create_time; /* When table was created */ time_t check_time; time_t update_time; - ulong mean_rec_length; /* physical reclength */ + uint errkey; /* Last dup key */ + uint sortkey, key_used_on_scan; + uint active_index; + /* Length of ref (1-8 or the clustered key length) */ + uint ref_length; + uint block_size; /* index block size */ + uint raid_type,raid_chunks; FT_INFO *ft_handler; bool auto_increment_column_changed; - handler(TABLE *table_arg) : table(table_arg),active_index(MAX_REF_PARTS), - ref(0),ref_length(sizeof(my_off_t)), block_size(0),records(0),deleted(0), - data_file_length(0), max_data_file_length(0), index_file_length(0), - delete_length(0), auto_increment_value(0), raid_type(0), - key_used_on_scan(MAX_KEY), - create_time(0), check_time(0), update_time(0), mean_rec_length(0), - ft_handler(0) + handler(TABLE *table_arg) :table(table_arg), + ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0), + delete_length(0), auto_increment_value(0), + records(0), deleted(0), mean_rec_length(0), + create_time(0), check_time(0), update_time(0), + key_used_on_scan(MAX_KEY), active_index(MAX_REF_PARTS), + ref_length(sizeof(my_off_t)), block_size(0), + raid_type(0), ft_handler(0) {} virtual ~handler(void) {} int ha_open(const char *name, int mode, int test_if_locked); @@ -227,7 +228,7 @@ public: uint get_dup_key(int error); void change_table_ptr(TABLE *table_arg) { table=table_arg; } virtual double scan_time() - { return ulonglong2double(data_file_length) / IO_SIZE + 1; } + { return ulonglong2double(data_file_length) / IO_SIZE + 2; } virtual double read_time(uint index, uint ranges, ha_rows rows) { return rows2double(ranges+rows); } virtual bool fast_key_read() { return 0;} diff --git a/sql/item.h b/sql/item.h index 09d428509d0..1631bf76135 100644 --- a/sql/item.h +++ b/sql/item.h @@ -71,7 +71,24 @@ public: virtual double val_result() { return val(); } virtual longlong val_int_result() { return val_int(); } virtual String *str_result(String* tmp) { return val_str(tmp); } + /* bit map of tables used by item */ virtual table_map used_tables() const { return (table_map) 0L; } + /* + Return table map of tables that can't be NULL tables (tables that are + used in a context where if they would contain a NULL row generated + by a LEFT or RIGHT join, the item would not be true). + This expression is used on WHERE item to determinate if a LEFT JOIN can be + converted to a normal join. + Generally this function should return used_tables() if the function + would return null if any of the arguments are null + As this is only used in the beginning of optimization, the value don't + have to be updated in update_used_tables() + */ + virtual table_map not_null_tables() const { return used_tables(); } + /* + Returns true if this is a simple constant item like an integer, not + a constant expression + */ virtual bool basic_const_item() const { return 0; } virtual Item *new_item() { return 0; } /* Only for const items */ virtual cond_result eq_cmp_result() const { return COND_OK; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 3344f2bc01d..78a63d84c61 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -292,10 +292,12 @@ void Item_func_interval::fix_length_and_dec() } } maybe_null=0; max_length=2; - used_tables_cache|=item->used_tables(); + used_tables_cache|= item->used_tables(); + not_null_tables_cache&= item->not_null_tables(); with_sum_func= with_sum_func || item->with_sum_func; } + void Item_func_interval::split_sum_func(List<Item> &fields) { if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) @@ -1073,8 +1075,9 @@ void Item_func_in::fix_length_and_dec() } maybe_null= item->maybe_null; max_length=2; - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); + used_tables_cache|= item->used_tables(); + not_null_tables_cache&= item->not_null_tables(); + const_item_cache&= item->const_item(); } @@ -1172,16 +1175,23 @@ Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) List_iterator<Item> li(list); Item *item; char buff[sizeof(char*)]; // Max local vars in function - used_tables_cache=0; - const_item_cache=0; + not_null_tables_cache= used_tables_cache= 0; + const_item_cache= 0; + /* + and_table_cache is the value that Item_cond_or() returns for + not_null_tables() + */ + and_tables_cache= ~(table_map) 0; if (thd && check_stack_overrun(thd,buff)) return 0; // Fatal error flag is set! while ((item=li++)) { + table_map tmp_table_map; while (item->type() == Item::COND_ITEM && ((Item_cond*) item)->functype() == functype()) { // Identical function + li.replace(((Item_cond*) item)->list); ((Item_cond*) item)->list.empty(); #ifdef DELETE_ITEMS @@ -1193,9 +1203,12 @@ Item_cond::fix_fields(THD *thd,TABLE_LIST *tables) item->top_level_item(); if (item->fix_fields(thd,tables)) return 1; /* purecov: inspected */ - used_tables_cache|=item->used_tables(); - with_sum_func= with_sum_func || item->with_sum_func; - const_item_cache&=item->const_item(); + used_tables_cache|= item->used_tables(); + tmp_table_map= item->not_null_tables(); + not_null_tables_cache|= tmp_table_map; + and_tables_cache&= tmp_table_map; + const_item_cache&= item->const_item(); + with_sum_func= with_sum_func || item->with_sum_func; if (item->maybe_null) maybe_null=1; } @@ -1234,17 +1247,19 @@ Item_cond::used_tables() const return used_tables_cache; } + void Item_cond::update_used_tables() { - used_tables_cache=0; - const_item_cache=1; List_iterator_fast<Item> li(list); Item *item; + + used_tables_cache=0; + const_item_cache=1; while ((item=li++)) { item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&= item->const_item(); + used_tables_cache|= item->used_tables(); + const_item_cache&= item->const_item(); } } @@ -1348,12 +1363,16 @@ Item *and_expressions(Item *a, Item *b, Item **org_item) { Item_cond *res; if ((res= new Item_cond_and(a, (Item*) b))) + { res->used_tables_cache= a->used_tables() | b->used_tables(); + res->not_null_tables_cache= a->not_null_tables() | b->not_null_tables(); + } return res; } if (((Item_cond_and*) a)->add((Item*) b)) return 0; ((Item_cond_and*) a)->used_tables_cache|= b->used_tables(); + ((Item_cond_and*) a)->not_null_tables_cache|= b->not_null_tables(); return a; } @@ -1489,6 +1508,8 @@ Item_func_regex::fix_fields(THD *thd,TABLE_LIST *tables) max_length=1; decimals=0; binary=args[0]->binary || args[1]->binary; used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); + not_null_tables_cache= (args[0]->not_null_tables() | + args[1]->not_null_tables()); const_item_cache=args[0]->const_item() && args[1]->const_item(); if (!regex_compiled && args[1]->const_item()) { diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index f7ade97940c..536ac9dc3d4 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -204,7 +204,7 @@ public: enum Item_result result_type () const { return cached_result_type; } void fix_length_and_dec(); const char *func_name() const { return "ifnull"; } - unsigned int size_of() { return sizeof(*this);} + table_map not_null_tables() const { return 0; } }; @@ -224,7 +224,7 @@ public: } void fix_length_and_dec(); const char *func_name() const { return "if"; } - unsigned int size_of() { return sizeof(*this);} + table_map not_null_tables() const { return 0; } }; @@ -239,7 +239,7 @@ public: enum Item_result result_type () const { return cached_result_type; } void fix_length_and_dec(); const char *func_name() const { return "nullif"; } - unsigned int size_of() { return sizeof(*this);} + table_map not_null_tables() const { return 0; } }; @@ -254,9 +254,10 @@ public: void fix_length_and_dec(); enum Item_result result_type () const { return cached_result_type; } const char *func_name() const { return "coalesce"; } - unsigned int size_of() { return sizeof(*this);} + table_map not_null_tables() const { return 0; } }; + class Item_func_case :public Item_func { Item * first_expr, *else_expr; @@ -270,6 +271,7 @@ public: String *val_str(String *); void fix_length_and_dec(); void update_used_tables(); + table_map not_null_tables() const { return 0; } enum Item_result result_type () const { return cached_result_type; } const char *func_name() const { return "case"; } void print(String *str); @@ -479,10 +481,12 @@ public: } } } + table_map not_null_tables() const { return 0; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } unsigned int size_of() { return sizeof(*this);} }; + class Item_func_isnotnull :public Item_bool_func { public: @@ -495,9 +499,10 @@ public: } const char *func_name() const { return "isnotnull"; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } - unsigned int size_of() { return sizeof(*this);} + table_map not_null_tables() const { return 0; } }; + class Item_func_like :public Item_bool_func2 { char escape; @@ -572,6 +577,8 @@ class Item_cond :public Item_bool_func protected: List<Item> list; bool abort_on_null; + table_map and_tables_cache; + public: /* Item_cond() is only used to create top level items */ Item_cond() : Item_bool_func(), abort_on_null(1) { const_item_cache=0; } @@ -611,6 +618,7 @@ public: enum Functype functype() const { return COND_OR_FUNC; } longlong val_int(); const char *func_name() const { return "or"; } + table_map not_null_tables() const { return and_tables_cache; } }; diff --git a/sql/item_func.cc b/sql/item_func.cc index e847b203006..23bec0c3c81 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -61,7 +61,7 @@ Item_func::fix_fields(THD *thd,TABLE_LIST *tables) Item **arg,**arg_end; char buff[STACK_BUFF_ALLOC]; // Max argument in function binary=0; - used_tables_cache=0; + used_tables_cache= not_null_tables_cache= 0; const_item_cache=1; if (thd && check_stack_overrun(thd,buff)) @@ -78,8 +78,9 @@ Item_func::fix_fields(THD *thd,TABLE_LIST *tables) if (item->binary) binary=1; with_sum_func= with_sum_func || item->with_sum_func; - used_tables_cache|=item->used_tables(); - const_item_cache&= item->const_item(); + used_tables_cache|= item->used_tables(); + not_null_tables_cache|= item->not_null_tables(); + const_item_cache&= item->const_item(); } } fix_length_and_dec(); @@ -122,6 +123,13 @@ table_map Item_func::used_tables() const return used_tables_cache; } + +table_map Item_func::not_null_tables() const +{ + return not_null_tables_cache; +} + + void Item_func::print(String *str) { str->append(func_name()); diff --git a/sql/item_func.h b/sql/item_func.h index 68e5335dc7e..8a4cace0b87 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -34,7 +34,7 @@ protected: Item **args,*tmp_arg[2]; public: uint arg_count; - table_map used_tables_cache; + table_map used_tables_cache, not_null_tables_cache; bool const_item_cache; enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, @@ -97,6 +97,7 @@ public: bool fix_fields(THD *,struct st_table_list *); void make_field(Send_field *field); table_map used_tables() const; + table_map not_null_tables() const; void update_used_tables(); bool eq(const Item *item, bool binary_cmp) const; virtual optimize_type select_optimize() const { return OPTIMIZE_NONE; } @@ -588,7 +589,8 @@ public: void split_sum_func(List<Item> &fields); void update_used_tables() { - item->update_used_tables() ; Item_func::update_used_tables(); + item->update_used_tables(); + Item_func::update_used_tables(); used_tables_cache|= item->used_tables(); const_item_cache&= item->const_item(); } @@ -597,6 +599,7 @@ public: { maybe_null=0; max_length=3; used_tables_cache|= item->used_tables(); + not_null_tables_cache&= item->not_null_tables(); const_item_cache&= item->const_item(); with_sum_func= with_sum_func || item->with_sum_func; } @@ -736,6 +739,7 @@ public: return res; } Item_result result_type () const { return udf.result_type(); } + table_map not_null_tables() const { return 0; } unsigned int size_of() { return sizeof(*this);} }; @@ -969,6 +973,7 @@ public: } enum Functype functype() const { return FT_FUNC; } void update_used_tables() {} + table_map not_null_tables() const { return 0; } bool fix_fields(THD *thd,struct st_table_list *tlist); bool eq(const Item *, bool binary_cmp) const; longlong val_int() { return val()!=0.0; } diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 9d4f7641b1d..995d413626c 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -606,9 +606,10 @@ void Item_func_concat_ws::fix_length_and_dec() max_length=MAX_BLOB_WIDTH; maybe_null=1; } - used_tables_cache|=separator->used_tables(); - const_item_cache&=separator->const_item(); - with_sum_func= with_sum_func || separator->with_sum_func; + used_tables_cache|= separator->used_tables(); + not_null_tables_cache&= separator->not_null_tables(); + const_item_cache&= separator->const_item(); + with_sum_func= with_sum_func || separator->with_sum_func; } void Item_func_concat_ws::update_used_tables() @@ -1509,8 +1510,9 @@ void Item_func_elt::fix_length_and_dec() } maybe_null=1; // NULL if wrong first arg with_sum_func= with_sum_func || item->with_sum_func; - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); + used_tables_cache|= item->used_tables(); + not_null_tables_cache&= item->not_null_tables(); + const_item_cache&= item->const_item(); } @@ -1600,8 +1602,9 @@ void Item_func_make_set::fix_length_and_dec() max_length=arg_count-1; for (uint i=1 ; i < arg_count ; i++) max_length+=args[i]->max_length; - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); + used_tables_cache|= item->used_tables(); + not_null_tables_cache&= item->not_null_tables(); + const_item_cache&= item->const_item(); with_sum_func= with_sum_func || item->with_sum_func; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5613f1eeb07..f27ba435729 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3154,7 +3154,7 @@ enum options { OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE, OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS, OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE, - OPT_MAX_JOIN_SIZE, OPT_MAX_SORT_LENGTH, + OPT_MAX_JOIN_SIZE, OPT_MAX_SORT_LENGTH, OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS, OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE, OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, @@ -3832,6 +3832,11 @@ replicating a LOAD DATA INFILE command", (gptr*) &global_system_variables.max_join_size, (gptr*) &max_system_variables.max_join_size, 0, GET_HA_ROWS, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + { "max_seeks_for_key", OPT_MAX_SEEKS_FOR_KEY, + "Limit assumed max number of seeks when looking up rows based on a key", + (gptr*) &global_system_variables.max_seeks_for_key, + (gptr*) &max_system_variables.max_seeks_for_key, 0, GET_ULONG, + REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0 }, {"max_sort_length", OPT_MAX_SORT_LENGTH, "The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored).", (gptr*) &global_system_variables.max_sort_length, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index a8bc559e3a0..13f786e0e75 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -21,6 +21,9 @@ Read packets are reallocated dynamicly when reading big packets. Each logical packet has the following pre-info: 3 byte length & 1 byte package-number. + + This file needs to be written in C as it's used by the libmysql client as a + C file. */ #ifdef __WIN__ diff --git a/sql/set_var.cc b/sql/set_var.cc index 32603ec51d9..b66c410c6d5 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -154,6 +154,8 @@ sys_var_thd_ulong sys_max_heap_table_size("max_heap_table_size", sys_var_thd_ha_rows sys_max_join_size("max_join_size", &SV::max_join_size, fix_max_join_size); +sys_var_thd_ulong sys_max_seeks_for_key("max_seeks_for_key", + &SV::max_seeks_for_key); #ifndef TO_BE_DELETED /* Alias for max_join_size */ sys_var_thd_ha_rows sys_sql_max_join_size("sql_max_join_size", &SV::max_join_size, @@ -348,6 +350,7 @@ sys_var *sys_variables[]= &sys_max_delayed_threads, &sys_max_heap_table_size, &sys_max_join_size, + &sys_max_seeks_for_key, &sys_max_sort_length, &sys_max_tmp_tables, &sys_max_user_connections, @@ -492,6 +495,7 @@ struct show_var_st init_vars[]= { {sys_max_delayed_threads.name,(char*) &sys_max_delayed_threads, SHOW_SYS}, {sys_max_heap_table_size.name,(char*) &sys_max_heap_table_size, SHOW_SYS}, {sys_max_join_size.name, (char*) &sys_max_join_size, SHOW_SYS}, + {sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS}, {sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS}, {sys_max_user_connections.name,(char*) &sys_max_user_connections, SHOW_SYS}, {sys_max_tmp_tables.name, (char*) &sys_max_tmp_tables, SHOW_SYS}, @@ -1079,6 +1083,11 @@ byte *sys_var_thd_conv_charset::value_ptr(THD *thd, enum_var_type type) } +void sys_var_thd_conv_charset::set_default(THD *thd, enum_var_type type) +{ + thd->variables.convert_set= global_system_variables.convert_set; +} + bool sys_var_timestamp::update(THD *thd, set_var *var) { diff --git a/sql/set_var.h b/sql/set_var.h index f33f53c5acc..e22c55276a7 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -412,6 +412,7 @@ public: return type != STRING_RESULT; /* Only accept strings */ } bool check_default(enum_var_type type) { return 0; } + void set_default(THD *thd, enum_var_type type); }; diff --git a/sql/slave.cc b/sql/slave.cc index fe668900da0..851c6ba4f02 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -2146,8 +2146,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) DBUG_ASSERT(rli->sql_thd==thd); if (sql_slave_killed(thd,rli)) { - /* do not forget to free ev ! */ - if (ev) delete ev; + delete ev; return 1; } if (ev) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 3bfd5e14d43..43718e5d93b 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1903,11 +1903,11 @@ bool setup_tables(TABLE_LIST *tables) table->used_fields=0; table->const_table=0; - table->outer_join=table->null_row=0; + table->null_row=0; table->status=STATUS_NO_RECORD; table->keys_in_use_for_query= table->keys_in_use; table->used_keys= table->keys_for_keyread; - table->maybe_null=test(table->outer_join=table_list->outer_join); + table->maybe_null=test(table->outer_join= table_list->outer_join); table->tablenr=tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; @@ -2027,6 +2027,7 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) { + table_map not_null_tables= 0; DBUG_ENTER("setup_conds"); thd->set_query_id=1; thd->cond_count=0; @@ -2036,6 +2037,7 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) thd->where="where clause"; if ((*conds)->fix_fields(thd,tables)) DBUG_RETURN(1); + not_null_tables= (*conds)->not_null_tables(); } /* Check if we are using outer joins */ @@ -2049,9 +2051,15 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) DBUG_RETURN(1); thd->cond_count++; - /* If it's a normal join, add the ON/USING expression to the WHERE */ - if (!table->outer_join) + /* + If it's a normal join or a LEFT JOIN which can be optimized away + add the ON/USING expression to the WHERE + */ + if (!table->outer_join || + ((table->table->map & not_null_tables) && + !(specialflag & SPECIAL_NO_NEW_FUNC))) { + table->outer_join= 0; if (!(*conds=and_conds(*conds, table->on_expr))) DBUG_RETURN(1); table->on_expr=0; diff --git a/sql/sql_class.h b/sql/sql_class.h index a8a24451ecc..d1b2ef82ccb 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -310,6 +310,7 @@ struct system_variables ulong tx_isolation; ulong table_type; ulong default_week_format; + ulong max_seeks_for_key; my_bool log_warnings; my_bool low_priority_updates; @@ -781,11 +782,12 @@ class Unique :public Sql_alloc TREE tree; byte *record_pointers; bool flush(); + uint size; public: ulong elements; Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, - uint size, ulong max_in_memory_size_arg); + uint size_arg, ulong max_in_memory_size_arg); ~Unique(); inline bool unique_add(gptr ptr) { @@ -800,26 +802,27 @@ public: friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique); }; - class multi_delete : public select_result { - TABLE_LIST *delete_tables, *table_being_deleted; - Unique **tempfiles; - THD *thd; - ha_rows deleted; - uint num_of_tables; - int error; - bool do_delete, transactional_tables, log_delayed, normal_tables; - public: - multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables); - ~multi_delete(); - int prepare(List<Item> &list); - bool send_fields(List<Item> &list, +class multi_delete : public select_result +{ + TABLE_LIST *delete_tables, *table_being_deleted; + Unique **tempfiles; + THD *thd; + ha_rows deleted; + uint num_of_tables; + int error; + bool do_delete, transactional_tables, log_delayed, normal_tables; +public: + multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables); + ~multi_delete(); + int prepare(List<Item> &list); + bool send_fields(List<Item> &list, uint flag) { return 0; } - bool send_data(List<Item> &items); - bool initialize_tables (JOIN *join); - void send_error(uint errcode,const char *err); - int do_deletes (bool from_send_error); - bool send_eof(); - }; + bool send_data(List<Item> &items); + bool initialize_tables (JOIN *join); + void send_error(uint errcode,const char *err); + int do_deletes (bool from_send_error); + bool send_eof(); +}; class multi_update : public select_result { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e03b007f8b7..9c31064cd27 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1274,14 +1274,14 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, /* Set a max range of how many seeks we can expect when using keys - This was (s->read_time*5), but this was too low with small rows + This is can't be to high as otherwise we are likely to use + table scan. */ - s->worst_seeks= (double) s->found_records / 5; + s->worst_seeks= min((double) s->found_records / 10, + (double) s->read_time*3); if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; - /* if (s->type == JT_EQ_REF) - continue; */ if (s->const_keys) { ha_rows records; @@ -1887,6 +1887,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, best=best_time=records=DBL_MAX; KEYUSE *best_key=0; uint best_max_key_part=0; + my_bool found_constrain= 0; if (s->keyuse) { /* Use key if possible */ @@ -1967,6 +1968,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, } else { + found_constrain= 1; /* Check if we found full key */ @@ -2003,16 +2005,18 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, records=2.0; // Can't be as good as a unique } } + /* Limit the number of matched rows */ + tmp= records; + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->used_keys & ((key_map) 1 << key)) { /* we can use only index tree */ uint keys_per_block= table->file->block_size/2/ (keyinfo->key_length+table->file->ref_length)+1; - tmp=(record_count*(records+keys_per_block-1)/ - keys_per_block); + tmp=record_count*(tmp+keys_per_block-1)/keys_per_block; } else - tmp=record_count*min(records,s->worst_seeks); + tmp=record_count*min(tmp,s->worst_seeks); } } else @@ -2042,7 +2046,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { /* Assume that the first key part matches 1% of the file - and that the hole key matches 10 (dupplicates) or 1 + and that the hole key matches 10 (duplicates) or 1 (unique) records. Assume also that more key matches proportionally more records @@ -2074,6 +2078,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, records=(ulong) tmp; } } + /* Limit the number of matched rows */ + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->used_keys & ((key_map) 1 << key)) { /* we can use only index tree */ @@ -2116,20 +2122,31 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, s->table->used_keys && best_key) && !(s->table->force_index && best_key)) { // Check full join + ha_rows rnd_records= s->found_records; if (s->on_expr) { - tmp=rows2double(s->found_records); // Can't use read cache + tmp=rows2double(rnd_records); // Can't use read cache } else { tmp=(double) s->read_time; - /* Calculate time to read through cache */ + /* Calculate time to read previous rows through cache */ tmp*=(1.0+floor((double) cache_record_length(join,idx)* record_count / (double) thd->variables.join_buff_size)); } + + /* + If there is a restriction on the table, assume that 25% of the + rows can be skipped on next part. + This is to force tables that this table depends on before this + table + */ + if (found_constrain) + rnd_records-= rnd_records/4; + if (best == DBL_MAX || - (tmp + record_count/(double) TIME_FOR_COMPARE*s->found_records < + (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records < best + record_count/(double) TIME_FOR_COMPARE*records)) { /* @@ -2137,7 +2154,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, will ensure that this will be used */ best=tmp; - records= rows2double(s->found_records); + records= rows2double(rnd_records); best_key=0; } } diff --git a/sql/uniques.cc b/sql/uniques.cc index ed256a4b791..d00893a8605 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -49,8 +49,8 @@ int unique_write_to_ptrs(gptr key, element_count count, Unique *unique) } Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, - uint size, ulong max_in_memory_size_arg) - :max_in_memory_size(max_in_memory_size_arg),elements(0) + uint size_arg, ulong max_in_memory_size_arg) + :max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0) { my_b_clear(&file); init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, @@ -101,7 +101,7 @@ bool Unique::get(TABLE *table) { /* Whole tree is in memory; Don't use disk if you don't need to */ if ((record_pointers=table->record_pointers= (byte*) - my_malloc(tree.size_of_element * tree.elements_in_tree, MYF(0)))) + my_malloc(size * tree.elements_in_tree, MYF(0)))) { (void) tree_walk(&tree, (tree_walk_action) unique_write_to_ptrs, this, left_root_right); |